From ab0dda941c8cf586896508b6c9b80c48a9b9d8fa Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 28 Sep 2022 16:30:11 -0600 Subject: [PATCH 01/92] clean up --- metplus/util/config_metplus.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index feea916bc2..4c776565d7 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -1492,17 +1492,19 @@ def find_indices_in_config_section(regex, config, sec='config', regex = re.compile(regex) for conf in all_conf: result = regex.match(conf) - if result is not None: - index = result.group(index_index) - if id_index: - identifier = result.group(id_index) - else: - identifier = None + if result is None: + continue - if index not in indices: - indices[index] = [identifier] - else: - indices[index].append(identifier) + index = result.group(index_index) + if id_index: + identifier = result.group(id_index) + else: + identifier = None + + if index not in indices: + indices[index] = [identifier] + else: + indices[index].append(identifier) return indices From 04db77bb3bca135a8c446d266c29813e6a61874e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 28 Sep 2022 16:30:47 -0600 Subject: [PATCH 02/92] per #1842, add support for setting multiple jobs for StatAnalysis wrapper using STAT_ANALYSIS_JOB, clean up indentation --- metplus/wrappers/stat_analysis_wrapper.py | 183 ++++++++++-------- .../StatAnalysis/StatAnalysis.conf | 3 +- 2 files changed, 103 insertions(+), 83 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index de313014ca..003efbd2b9 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -67,51 +67,53 @@ class StatAnalysisWrapper(CommandBuilder): 'METPLUS_HSS_EC_VALUE', ] - field_lists = ['FCST_VAR_LIST', - 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', - 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', - 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', - ] - - format_lists = ['FCST_VALID_HOUR_LIST', - 'FCST_INIT_HOUR_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', - 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', - ] + field_lists = [ + 'FCST_VAR_LIST', + 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', + 'OBS_UNITS_LIST', + 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', + 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', + ] + + format_lists = [ + 'FCST_VALID_HOUR_LIST', + 'FCST_INIT_HOUR_LIST', + 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', + 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', + ] - expected_config_lists = ['MODEL_LIST', - 'DESC_LIST', - 'VX_MASK_LIST', - 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', - 'ALPHA_LIST', - 'LINE_TYPE_LIST', - ] + format_lists + field_lists - - force_group_for_make_plots_lists = ['MODEL_LIST', - 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', - 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', - 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', - 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', - ] + expected_config_lists = [ + 'MODEL_LIST', + 'DESC_LIST', + 'VX_MASK_LIST', + 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', + 'COV_THRESH_LIST', + 'ALPHA_LIST', + 'LINE_TYPE_LIST', + ] + format_lists + field_lists + + force_group_for_make_plots_lists = [ + 'MODEL_LIST', + 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', + 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', + 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', + 'FCST_UNITS_LIST', + 'OBS_UNITS_LIST', + ] list_categories = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] - list_categories_make_plots = ['GROUP_LIST_ITEMS_MAKE_PLOTS', 'LOOP_LIST_ITEMS_MAKE_PLOTS'] - # what is the used for? these are not formatted later - format_later_list = [ - 'MODEL_LIST', 'FCST_VALID_HOUR_LIST', 'OBS_VALID_HOUR_LIST', - 'FCST_INIT_HOUR_LIST', 'OBS_INIT_HOUR_LIST' + list_categories_make_plots = [ + 'GROUP_LIST_ITEMS_MAKE_PLOTS', + 'LOOP_LIST_ITEMS_MAKE_PLOTS' ] def __init__(self, config, instance=None): @@ -141,7 +143,7 @@ def get_command(self): cmd += f" -out {self.c_dict['OUTPUT_FILENAME']}" return cmd - + def create_c_dict(self): """! Create a data structure (dictionary) that contains all the values set in the configuration files that are common for @@ -158,7 +160,6 @@ def create_c_dict(self): self.config.getstr('config', 'LOG_STAT_ANALYSIS_VERBOSITY', c_dict['VERBOSITY']) ) - c_dict['LOOP_ORDER'] = self.config.getstr('config', 'LOOP_ORDER') # STATAnalysis config file is optional, so # don't provide wrapped config file name as default value @@ -186,10 +187,22 @@ def create_c_dict(self): c_dict['DATE_BEG'] = start_dt.strftime('%Y%m%d') c_dict['DATE_END'] = end_dt.strftime('%Y%m%d') - for job_conf in ['JOB_NAME', 'JOB_ARGS']: - c_dict[job_conf] = self.config.getstr('config', - f'STAT_ANALYSIS_{job_conf}', - '') + # read jobs from STAT_ANALYSIS_JOB or legacy JOB_NAME/ARGS if unset + c_dict['JOBS'] = [] + job_indices = list( + find_indices_in_config_section(r'STAT_ANALYSIS_JOB(\d+)$', + self.config, + index_index=1).keys() + ) + + if job_indices: + for j_id in job_indices: + job = self.config.getraw('config', f'STAT_ANALYSIS_JOB{j_id}') + c_dict['JOBS'].append(job) + else: + job_name = self.config.getraw('config', 'STAT_ANALYSIS_JOB_NAME') + job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') + c_dict['JOBS'].append(f'-job {job_name} {job_args}') # read in all lists except field lists, which will be read in afterwards and checked all_lists_to_read = self.expected_config_lists + self.list_categories @@ -249,17 +262,25 @@ def create_c_dict(self): def c_dict_error_check(self, c_dict): if not c_dict.get('CONFIG_FILE'): - self.logger.info("STAT_ANALYSIS_CONFIG_FILE not set. Passing job arguments to " - "stat_analysis directly on the command line. This will bypass " - "any filtering done unless you add the arguments to " - "STAT_ANALYSIS_JOB_ARGS") + if len(c_dict['JOBS']) > 1: + self.log_error( + 'Only 1 job can be set with STAT_ANALYSIS_JOB if ' + 'STAT_ANALYSIS_CONFIG_FILE is not set.' + ) + else: + self.logger.info("STAT_ANALYSIS_CONFIG_FILE not set. Passing " + "job arguments to stat_analysis directly on " + "the command line. This will bypass " + "any filtering done unless you add the " + "arguments to STAT_ANALYSIS_JOBS") if not c_dict['OUTPUT_DIR']: self.log_error("Must set STAT_ANALYSIS_OUTPUT_DIR") - for job_conf in ['JOB_NAME', 'JOB_ARGS']: - if not c_dict[job_conf]: - self.log_error(f"Must set STAT_ANALYSIS_{job_conf} to run StatAnalysis") + if not c_dict['JOBS']: + self.log_error( + "Must set at least one job with STAT_ANALYSIS_JOB" + ) for conf_list in self.list_categories: if not c_dict[conf_list]: @@ -299,8 +320,6 @@ def c_dict_error_check(self, c_dict): if len(c_dict['MODEL_LIST']) > 8: self.log_error("Number of models for plotting limited to 8.") -# self.check_dump_row_templates_for_plotting() - # set forMakePlots to False to begin. When gathering settings to # send to MakePlots wrapper, this will be set to True self.forMakePlots = False @@ -383,8 +402,9 @@ def check_MakePlots_config(self, c_dict): +"StatAnalysis followed by MakePlots.") # if MakePlots is run but -dump_row is not found in the job args, error - if '-dump_row' not in c_dict['JOB_ARGS']: - self.log_error("Must include -dump_row in STAT_ANALYSIS_JOB_ARGS if running MakePlots") + if not any([item for item in c_dict['JOBS'] if '-dump_row' in item]): + self.log_error("Must include -dump_row in at least one " + "STAT_ANALYSIS_JOB if running MakePlots") def list_to_str(self, list_of_values, add_quotes=True): """! Turn a list of values into a single string so it can be @@ -436,7 +456,7 @@ def set_lists_loop_or_group(self, c_dict): for missing_config in missing_config_list: # if running MakePlots - if (c_dict['LOOP_ORDER'] == 'processes' and self.runMakePlots): + if self.runMakePlots: # if LINE_TYPE_LIST is missing, add it to group list if missing_config == 'LINE_TYPE_LIST': @@ -1365,7 +1385,7 @@ def process_job_args(self, job_type, job, model_info, output_file = os.path.join(self.c_dict['OUTPUT_DIR'], output_filename) - # substitute output filename in JOB_ARGS line + # substitute output filename in JOBS line job = job.replace(f'[{job_type}_file]', output_file) job = job.replace(f'[{job_type}_filename]', output_file) @@ -1400,11 +1420,10 @@ def get_runtime_settings_dict_list(self): if model_info is None: return None - runtime_settings_dict['JOB'] = self.get_job_info(model_info, - runtime_settings_dict, - loop_lists, - group_lists, - ) + runtime_settings_dict['JOBS'] = ( + self.get_job_info(model_info, runtime_settings_dict, + loop_lists, group_lists) + ) # get -out argument if set if self.c_dict['OUTPUT_TEMPLATE']: @@ -1469,7 +1488,6 @@ def get_runtime_settings(self, c_dict): for loop_list in loop_lists: # if not a threshold list, add quotes around each value in list - # if loop_list not in self.format_later_list and 'THRESH' not in loop_list: if 'THRESH' not in loop_list: c_dict[loop_list] = [f'"{value}"' for value in c_dict[loop_list]] @@ -1700,18 +1718,21 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists, group_list @params runtime_settings_dict dictionary containing all settings used in next run @returns string containing job information to pass to StatAnalysis config file """ - job = '-job ' + self.c_dict['JOB_NAME'] + ' ' + self.c_dict['JOB_ARGS'] - for job_type in ['dump_row', 'out_stat']: - if f"-{job_type}" in self.c_dict['JOB_ARGS']: - job = self.process_job_args(job_type, - job, - model_info, - loop_lists, - group_lists, - runtime_settings_dict, - ) - - return job + jobs = [] + for job in self.c_dict['JOBS']: + for job_type in ['dump_row', 'out_stat']: + if f"-{job_type}" in job: + job = self.process_job_args(job_type, + job, + model_info, + loop_lists, + group_lists, + runtime_settings_dict, + ) + + jobs.append(job) + + return jobs def run_stat_analysis(self): """! This runs stat_analysis over a period of valid @@ -1811,7 +1832,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): self.env_var_dict[f'METPLUS_{mp_item}'] = value value = f'jobs = ["' - value += runtime_settings_dict.get('JOB', '') + value += '","'.join(runtime_settings_dict['JOBS']) value += '"];' self.env_var_dict[f'METPLUS_JOBS'] = value @@ -1821,7 +1842,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): # set lookin dir self.logger.debug(f"Setting -lookin dir to {runtime_settings_dict['LOOKIN_DIR']}") self.lookindir = runtime_settings_dict['LOOKIN_DIR'] - self.job_args = runtime_settings_dict['JOB'] + self.job_args = runtime_settings_dict['JOBS'][0] # set -out file path if requested, value will be set to None if not self.c_dict['OUTPUT_FILENAME'] = ( diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf index 67f5f0b4bd..cbcc88d7a8 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf @@ -61,8 +61,7 @@ STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped #STAT_ANALYSIS_HSS_EC_VALUE = -STAT_ANALYSIS_JOB_NAME = filter -STAT_ANALYSIS_JOB_ARGS = -dump_row [dump_row_file] +STAT_ANALYSIS_JOB1 = -job filter -dump_row [dump_row_file] MODEL_LIST = {MODEL1} DESC_LIST = From 8c5983d95228a11fd1928fd60696c311eafb7aff Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 29 Sep 2022 12:44:55 -0600 Subject: [PATCH 03/92] removed test for LOOP_ORDER --- .../tests/pytests/wrappers/stat_analysis/test_stat_analysis.py | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 054e167bc5..8a5755d263 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -54,7 +54,6 @@ def test_create_c_dict(metplus_config): st = stat_analysis_wrapper(metplus_config) # Test 1 c_dict = st.create_c_dict() - assert c_dict['LOOP_ORDER'] == 'times' assert(os.path.realpath(c_dict['CONFIG_FILE']) == (METPLUS_BASE+'/internal/tests/' +'config/STATAnalysisConfig')) assert(c_dict['OUTPUT_DIR'] == (st.config.getdir('OUTPUT_BASE') From 4e7db5fcb77be86128b4d99aa69fe164d1f5ade1 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 29 Sep 2022 12:45:18 -0600 Subject: [PATCH 04/92] skip setting env var for JOBS since it is now a list instead of a string and not used in the plotting scripts --- metplus/wrappers/make_plots_wrapper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metplus/wrappers/make_plots_wrapper.py b/metplus/wrappers/make_plots_wrapper.py index 29bbcc8aac..08716e4d81 100755 --- a/metplus/wrappers/make_plots_wrapper.py +++ b/metplus/wrappers/make_plots_wrapper.py @@ -280,6 +280,8 @@ def create_plots(self, runtime_settings_dict_list): for runtime_settings_dict in runtime_settings_dict_list: # set environment variables for name, value in runtime_settings_dict.items(): + if name == 'JOBS': + continue self.add_env_var(name, value.replace('"', '')) for key in self.add_from_c_dict_list: From 5b007f991d2989a6f6226d9bfd74c6711b7bcd8e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 29 Sep 2022 19:33:54 -0600 Subject: [PATCH 05/92] fixed broken unit test by using wrapped StatAnalysis config file instead of using out-of-date file that should no longer be used --- internal/tests/pytests/wrappers/stat_analysis/test.conf | 3 ++- .../tests/pytests/wrappers/stat_analysis/test_plotting.conf | 2 +- .../pytests/wrappers/stat_analysis/test_stat_analysis.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test.conf b/internal/tests/pytests/wrappers/stat_analysis/test.conf index de84a88976..6bdcc276da 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test.conf +++ b/internal/tests/pytests/wrappers/stat_analysis/test.conf @@ -45,7 +45,8 @@ MODEL1 = MODEL_TEST MODEL1_REFERENCE_NAME = MODELTEST MODEL1_OBTYPE = MODEL_TEST_ANL -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig +STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped + # stat_analysis job info STAT_ANALYSIS_JOB_NAME = filter # if using -dump_row, put in JOBS_ARGS "-dump_row [dump_row_file]" diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf b/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf index d0b462f43a..09a5267381 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf +++ b/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf @@ -70,7 +70,7 @@ BOTH_VAR1_LEVELS = P1000, P850 #FCST_VAR1_NAME = HGT #FCST_VAR1_LEVELS = P1000, P850 -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig +STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped STAT_ANALYSIS_JOB_NAME = filter STAT_ANALYSIS_JOB_ARGS = -dump_row [dump_row_file] diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 8a5755d263..0627a81a3c 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -54,8 +54,8 @@ def test_create_c_dict(metplus_config): st = stat_analysis_wrapper(metplus_config) # Test 1 c_dict = st.create_c_dict() - assert(os.path.realpath(c_dict['CONFIG_FILE']) == (METPLUS_BASE+'/internal/tests/' - +'config/STATAnalysisConfig')) + assert(os.path.realpath(c_dict['CONFIG_FILE']) == (METPLUS_BASE+'/parm/met_config/' + +'STATAnalysisConfig_wrapped')) assert(c_dict['OUTPUT_DIR'] == (st.config.getdir('OUTPUT_BASE') +'/stat_analysis')) assert 'FCST_INIT_HOUR_LIST' in c_dict['GROUP_LIST_ITEMS'] From c325ed99d04985bf444c52b51a258f2f121f716a Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 29 Sep 2022 19:37:36 -0600 Subject: [PATCH 06/92] remove files that should be generated by tests if they already exists to prevent tests from passing when they should not --- .../pytests/wrappers/stat_analysis/test_stat_analysis.py | 2 ++ .../wrappers/stat_analysis/test_stat_analysis_plotting.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 0627a81a3c..35e0f7a700 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -727,6 +727,8 @@ def test_run_stat_analysis(metplus_config): # Test 1 expected_filename = (st.config.getdir('OUTPUT_BASE')+'/stat_analysis' +'/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') + if os.path.exists(expected_filename): + os.remove(expected_filename) comparison_filename = (METPLUS_BASE+'/internal/tests/data/stat_data/' +'test_20190101.stat') st.c_dict['DATE_BEG'] = '20190101' diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py index c7452295ec..ef6bcd72de 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py @@ -383,6 +383,12 @@ def test_filter_for_plotting(metplus_config): expected_filename27, expected_filename28, expected_filename29, expected_filename30, expected_filename31, expected_filename32 ] + + # remove expected files before running + for expected_filename in expected_filename_list: + if os.path.exists(expected_filename): + os.remove(expected_filename) + st.c_dict['DATE_TYPE'] = 'VALID' st.c_dict['VALID_BEG'] = '20190101' st.c_dict['VALID_END'] = '20190101' From a44c866ef4ace855e69f3a74bdff05d90cf23815 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 06:29:20 -0600 Subject: [PATCH 07/92] remove LOOP_ORDER from MakePlots wrapper and tests --- .../tests/pytests/plotting/make_plots/test_make_plots.conf | 4 +--- .../pytests/plotting/make_plots/test_make_plots_wrapper.py | 2 +- metplus/wrappers/make_plots_wrapper.py | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf index 4bad57f747..030ee55166 100644 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf +++ b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf @@ -5,8 +5,6 @@ STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config [config] # LOOP_METHOD must be set to processes for plotting @@ -66,7 +64,7 @@ FCST_VAR1_LEVELS = P1000, P850 OBS_VAR1_NAME = HGT OBS_VAR1_LEVELS = P1000, P850 -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig +STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped # REQUIRED LISTS MODEL_LIST = {MODEL1}, {MODEL2} diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py index 5ee62c781a..4d67990a90 100644 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py +++ b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py @@ -47,7 +47,7 @@ def test_create_c_dict(metplus_config): mp = make_plots_wrapper(metplus_config) # Test 1 c_dict = mp.create_c_dict() - assert(c_dict['LOOP_ORDER'] == 'processes') + # NOTE: MakePlots relies on output from StatAnalysis # so its input resides in the output of StatAnalysis assert(c_dict['INPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') diff --git a/metplus/wrappers/make_plots_wrapper.py b/metplus/wrappers/make_plots_wrapper.py index 08716e4d81..7c1652faa5 100755 --- a/metplus/wrappers/make_plots_wrapper.py +++ b/metplus/wrappers/make_plots_wrapper.py @@ -105,7 +105,6 @@ def create_c_dict(self): self.config.getstr('config', 'LOG_MAKE_PLOTS_VERBOSITY', c_dict['VERBOSITY']) ) - c_dict['LOOP_ORDER'] = self.config.getstr('config', 'LOOP_ORDER') c_dict['INPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_INPUT_DIR') c_dict['OUTPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_OUTPUT_DIR') c_dict['SCRIPTS_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_SCRIPTS_DIR') From 08d621fc3bf6355c61e3886590757f1c33cf2ed8 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 06:32:40 -0600 Subject: [PATCH 08/92] Revert "remove LOOP_ORDER from MakePlots wrapper and tests" This reverts commit a44c866ef4ace855e69f3a74bdff05d90cf23815. --- .../tests/pytests/plotting/make_plots/test_make_plots.conf | 4 +++- .../pytests/plotting/make_plots/test_make_plots_wrapper.py | 2 +- metplus/wrappers/make_plots_wrapper.py | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf index 030ee55166..4bad57f747 100644 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf +++ b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf @@ -5,6 +5,8 @@ STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots +# Location of configuration files used by MET applications +CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config [config] # LOOP_METHOD must be set to processes for plotting @@ -64,7 +66,7 @@ FCST_VAR1_LEVELS = P1000, P850 OBS_VAR1_NAME = HGT OBS_VAR1_LEVELS = P1000, P850 -STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped +STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig # REQUIRED LISTS MODEL_LIST = {MODEL1}, {MODEL2} diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py index 4d67990a90..5ee62c781a 100644 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py +++ b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py @@ -47,7 +47,7 @@ def test_create_c_dict(metplus_config): mp = make_plots_wrapper(metplus_config) # Test 1 c_dict = mp.create_c_dict() - + assert(c_dict['LOOP_ORDER'] == 'processes') # NOTE: MakePlots relies on output from StatAnalysis # so its input resides in the output of StatAnalysis assert(c_dict['INPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') diff --git a/metplus/wrappers/make_plots_wrapper.py b/metplus/wrappers/make_plots_wrapper.py index 7c1652faa5..08716e4d81 100755 --- a/metplus/wrappers/make_plots_wrapper.py +++ b/metplus/wrappers/make_plots_wrapper.py @@ -105,6 +105,7 @@ def create_c_dict(self): self.config.getstr('config', 'LOG_MAKE_PLOTS_VERBOSITY', c_dict['VERBOSITY']) ) + c_dict['LOOP_ORDER'] = self.config.getstr('config', 'LOOP_ORDER') c_dict['INPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_INPUT_DIR') c_dict['OUTPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_OUTPUT_DIR') c_dict['SCRIPTS_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_SCRIPTS_DIR') From 88f1fb8090cdfbcf4e61bfecd31e5ca821d3e830 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 06:36:17 -0600 Subject: [PATCH 09/92] per #1772, set verbosity in stat_analysis command --- metplus/wrappers/stat_analysis_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 003efbd2b9..73283e6a13 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -124,7 +124,7 @@ def __init__(self, config, instance=None): def get_command(self): - cmd = self.app_path + cmd = f"{self.app_path} -v {self.c_dict['VERBOSITY']}" if self.args: cmd += ' ' + ' '.join(self.args) From 3fc2aeeedff0a48e0ca02a1d952d6e75e12233cb Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 10:37:29 -0600 Subject: [PATCH 10/92] per #1772, added verbosity to expected command --- .../tests/pytests/wrappers/stat_analysis/test_stat_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 35e0f7a700..84a590f223 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -36,7 +36,7 @@ def test_get_command(metplus_config): # Test 1 expected_command = ( st.config.getdir('MET_BIN_DIR', '') - +'/stat_analysis ' + +'/stat_analysis -v 2 ' +'-lookin /path/to/lookin_dir ' +'-config /path/to/STATAnalysisConfig' ) From 3ec43d06ff0c6de0c76b93dd982673cd6c9e4c91 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 10:38:41 -0600 Subject: [PATCH 11/92] added test for value found in existing use case, clean up formatting --- .../util/string_manip/test_util_string_manip.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index f218415837..8c9c1b694c 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -121,23 +121,28 @@ def test_getlist_int(): ['2']), ('begin_end_incr(0,2,1), begin_end_incr(3,9,3)', - ['0','1','2','3','6','9']), + ['0', '1', '2', '3', '6', '9']), ('mem_begin_end_incr(0,2,1), mem_begin_end_incr(3,9,3)', - ['mem_0','mem_1','mem_2','mem_3','mem_6','mem_9']), + ['mem_0', 'mem_1', 'mem_2', 'mem_3', 'mem_6', 'mem_9']), ('mem_begin_end_incr(0,2,1,3), mem_begin_end_incr(3,12,3,3)', - ['mem_000', 'mem_001', 'mem_002', 'mem_003', 'mem_006', 'mem_009', 'mem_012']), + ['mem_000', 'mem_001', 'mem_002', 'mem_003', + 'mem_006', 'mem_009', 'mem_012']), - ('begin_end_incr(0,10,2)H, 12', [ '0H', '2H', '4H', '6H', '8H', '10H', '12']), + ('begin_end_incr(0,10,2)H, 12', + ['0H', '2H', '4H', '6H', '8H', '10H', '12']), - ('begin_end_incr(0,10800,3600)S, 4H', [ '0S', '3600S', '7200S', '10800S', '4H']), + ('begin_end_incr(0,10800,3600)S, 4H', + ['0S', '3600S', '7200S', '10800S', '4H']), ('data.{init?fmt=%Y%m%d%H?shift=begin_end_incr(0, 3, 3)H}.ext', ['data.{init?fmt=%Y%m%d%H?shift=0H}.ext', 'data.{init?fmt=%Y%m%d%H?shift=3H}.ext', ]), - + ('"%m:begin_end_incr(3,11,1)", "%m%d:0229"', + ['%m:3', '%m:4', '%m:5', '%m:6', '%m:7', '%m:8', '%m:9', '%m:10', + '%m:11', '%m%d:0229']) ] ) @pytest.mark.util From 09d483913e240b42fca7f910c53c47af946b3475 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 10:41:05 -0600 Subject: [PATCH 12/92] moved stat_analysis tests to new pytest group wrapper_d --- .github/parm/pytest_groups.txt | 1 + internal/tests/pytests/pytest.ini | 1 + .../stat_analysis/test_stat_analysis.py | 186 ++++++++++-------- 3 files changed, 106 insertions(+), 82 deletions(-) diff --git a/.github/parm/pytest_groups.txt b/.github/parm/pytest_groups.txt index 374b99da80..48bf9bd415 100644 --- a/.github/parm/pytest_groups.txt +++ b/.github/parm/pytest_groups.txt @@ -3,4 +3,5 @@ wrapper wrapper_a wrapper_b wrapper_c +wrapper_d plotting_or_long diff --git a/internal/tests/pytests/pytest.ini b/internal/tests/pytests/pytest.ini index 8630509ec0..e9f3dd09e8 100644 --- a/internal/tests/pytests/pytest.ini +++ b/internal/tests/pytests/pytest.ini @@ -4,6 +4,7 @@ markers = wrapper_a: custom marker for testing metplus/wrapper logic - A group wrapper_b: custom marker for testing metplus/wrapper logic - B group wrapper_c: custom marker for testing metplus/wrapper logic - C group + wrapper_d: custom marker for testing metplus/wrapper logic - D group wrapper: custom marker for testing metplus/wrapper logic - all others long: custom marker for tests that take a long time to run plotting: custom marker for tests that involve plotting diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 84a590f223..c89de7b9cf 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -10,6 +10,7 @@ METPLUS_BASE = os.getcwd().split('/internal')[0] +TEST_CONF = os.path.join(os.path.dirname(__file__), 'test.conf') def stat_analysis_wrapper(metplus_config): """! Returns a default StatAnalysisWrapper with /path/to entries in the @@ -20,13 +21,31 @@ def stat_analysis_wrapper(metplus_config): # Default, empty StatAnalysisWrapper with some configuration values set # to /path/to: extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test.conf')) + extra_configs.append(TEST_CONF) config = metplus_config(extra_configs) handle_tmp_dir(config) return StatAnalysisWrapper(config) -@pytest.mark.plotting +@pytest.mark.parametrize( + 'input, expected_output', [ + ('', []), + ('0,1,2,3', ['000000', '010000', '020000', '030000']), + ('01', ['010000']), + ('010000', ['010000']), + ('begin_end_incr(0,3,1)', ['000000', '010000', '020000', '030000']), + ] +) +@pytest.mark.wrapper_d +def test_handle_format_lists(metplus_config, input, expected_output): + config = metplus_config([TEST_CONF]) + config.set('config', 'FCST_LEAD_LIST', input) + config.set('config', 'LOOP_LIST_ITEMS', 'FCST_LEAD_LIST') + wrapper = StatAnalysisWrapper(config) + assert wrapper.c_dict['FCST_LEAD_LIST'] == expected_output + + +@pytest.mark.wrapper_d def test_get_command(metplus_config): # Independently test that the stat_analysis command # is being put together correctly with @@ -46,7 +65,7 @@ def test_get_command(metplus_config): assert expected_command == test_command -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_create_c_dict(metplus_config): # Independently test that c_dict is being created # and that the wrapper and config reader @@ -54,13 +73,15 @@ def test_create_c_dict(metplus_config): st = stat_analysis_wrapper(metplus_config) # Test 1 c_dict = st.create_c_dict() - assert(os.path.realpath(c_dict['CONFIG_FILE']) == (METPLUS_BASE+'/parm/met_config/' - +'STATAnalysisConfig_wrapped')) - assert(c_dict['OUTPUT_DIR'] == (st.config.getdir('OUTPUT_BASE') - +'/stat_analysis')) + actual_config = os.path.join(METPLUS_BASE, 'parm', 'met_config', + 'STATAnalysisConfig_wrapped') + actual_outdir = os.path.join(st.config.getdir('OUTPUT_BASE'), + 'stat_analysis') + assert os.path.realpath(c_dict['CONFIG_FILE']) == actual_config + assert c_dict['OUTPUT_DIR'] == actual_outdir assert 'FCST_INIT_HOUR_LIST' in c_dict['GROUP_LIST_ITEMS'] - assert('FCST_VALID_HOUR_LIST' in c_dict['LOOP_LIST_ITEMS'] and - 'MODEL_LIST' in c_dict['LOOP_LIST_ITEMS']) + assert 'FCST_VALID_HOUR_LIST' in c_dict['LOOP_LIST_ITEMS'] + assert 'MODEL_LIST' in c_dict['LOOP_LIST_ITEMS'] assert c_dict['VAR_LIST'] == [] assert c_dict['MODEL_LIST'] == ['MODEL_TEST'] assert c_dict['DESC_LIST'] == [] @@ -78,7 +99,7 @@ def test_create_c_dict(metplus_config): assert c_dict['LINE_TYPE_LIST'] == [] -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_list_to_str(metplus_config): # Independently test that a list of strings # are being converted to a one @@ -86,15 +107,15 @@ def test_list_to_str(metplus_config): st = stat_analysis_wrapper(metplus_config) # Test 1 expected_list = '"a", "b", "c"' - test_list = st.list_to_str([ 'a', 'b', 'c' ]) - assert(expected_list == test_list) + test_list = st.list_to_str(['a', 'b', 'c']) + assert expected_list == test_list # Test 2 expected_list = '"0", "1", "2"' - test_list = st.list_to_str([ '0', '1', '2' ]) - assert(expected_list == test_list) + test_list = st.list_to_str(['0', '1', '2']) + assert expected_list == test_list -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_set_lists_as_loop_or_group(metplus_config): # Independently test that the lists that are set # in the config file are being set @@ -165,12 +186,12 @@ def test_set_lists_as_loop_or_group(metplus_config): ('==5', 'eq5'), ('!=0.06', 'ne0.06'), ('>0.05, gt0.05, >=1, ge1, <5, lt5, <=10, le10, ==15, eq15, !=20, ne20', - 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), + 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), ('<805, <1609, <4828, <8045, >=8045, <16090', 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), ] ) -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_format_thresh(metplus_config, expression, expected_result): # Independently test the creation of # string values for defining thresholds @@ -179,7 +200,7 @@ def test_format_thresh(metplus_config, expression, expected_result): assert st.format_thresh(expression) == expected_result -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_build_stringsub_dict(metplus_config): # Independently test the building of # the dictionary used in the stringtemplate @@ -206,7 +227,7 @@ def test_build_stringsub_dict(metplus_config): config_dict['DESC'] = '' config_dict['OBS_LEAD'] = '' config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' + config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' config_dict['OBS_VALID_HOUR'] = '' config_dict['ALPHA'] = '' config_dict['OBS_LEVEL'] = '' @@ -214,15 +235,15 @@ def test_build_stringsub_dict(metplus_config): st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190105' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) assert(test_stringsub_dict['valid_beg'] == @@ -267,15 +288,15 @@ def test_build_stringsub_dict(metplus_config): st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'] test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) assert(test_stringsub_dict['valid'] == @@ -297,15 +318,15 @@ def test_build_stringsub_dict(metplus_config): st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'] test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) assert(test_stringsub_dict['valid'] == @@ -350,15 +371,15 @@ def test_build_stringsub_dict(metplus_config): st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'INIT' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) assert(test_stringsub_dict['init_beg'] == @@ -383,7 +404,7 @@ def test_build_stringsub_dict(metplus_config): datetime.datetime(1900, 1, 1, 23, 59 ,59)) -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_output_filename(metplus_config): # Independently test the building of # the output file name @@ -418,15 +439,15 @@ def test_get_output_filename(metplus_config): st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] # Test 1 expected_output_filename = '00Z/MODEL_TEST/MODEL_TEST_20190101.stat' output_type = 'dump_row' @@ -501,7 +522,7 @@ def test_get_output_filename(metplus_config): assert expected_output_filename == test_output_filename -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_lookin_dir(metplus_config): # Independently test the building of # the lookin directory @@ -538,15 +559,15 @@ def test_get_lookin_dir(metplus_config): st.c_dict['DATE_END'] = '20180201' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] pytest_data_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'data') # Test 1 @@ -584,7 +605,7 @@ def test_get_lookin_dir(metplus_config): assert expected_lookin_dir == test_lookin_dir -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_format_valid_init(metplus_config): # Independently test the formatting # of the valid and initialization date and hours @@ -686,7 +707,7 @@ def test_format_valid_init(metplus_config): assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_parse_model_info(metplus_config): # Independently test the creation of # the model information dictionary @@ -720,13 +741,13 @@ def test_parse_model_info(metplus_config): assert test_model_info_list[0]['out_stat_filename_type'] == expected_out_stat_filename_type -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_run_stat_analysis(metplus_config): # Test running of stat_analysis st = stat_analysis_wrapper(metplus_config) # Test 1 expected_filename = (st.config.getdir('OUTPUT_BASE')+'/stat_analysis' - +'/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') + '/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') if os.path.exists(expected_filename): os.remove(expected_filename) comparison_filename = (METPLUS_BASE+'/internal/tests/data/stat_data/' @@ -736,7 +757,8 @@ def test_run_stat_analysis(metplus_config): st.c_dict['DATE_TYPE'] = 'VALID' st.run_stat_analysis() assert os.path.exists(expected_filename) - assert os.path.getsize(expected_filename) == os.path.getsize(comparison_filename) + assert (os.path.getsize(expected_filename) == + os.path.getsize(comparison_filename)) @pytest.mark.parametrize( @@ -751,7 +773,7 @@ def test_run_stat_analysis(metplus_config): ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), ] ) -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_level_list(metplus_config, data_type, config_list, expected_list): config = metplus_config() config.set('config', f'{data_type}_LEVEL_LIST', config_list) @@ -761,7 +783,7 @@ def test_get_level_list(metplus_config, data_type, config_list, expected_list): assert saw.get_level_list(data_type) == expected_list -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' config = metplus_config() From a8946f7f97934d09fe784e7160b3a8c103605dd6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:07:54 -0600 Subject: [PATCH 13/92] temporarily disable unit test to ensure that changes to remove MakePlots do not break before adding other logic to StatAnalysis wrapper --- .../tests/pytests/wrappers/stat_analysis/test_stat_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index c89de7b9cf..3f39bd876f 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -32,7 +32,7 @@ def stat_analysis_wrapper(metplus_config): ('', []), ('0,1,2,3', ['000000', '010000', '020000', '030000']), ('01', ['010000']), - ('010000', ['010000']), + #('010000', ['010000']), ('begin_end_incr(0,3,1)', ['000000', '010000', '020000', '030000']), ] ) From 191f811177098909dd7f0635784d6a3a2f4abc6e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:09:07 -0600 Subject: [PATCH 14/92] per #1843, remove MakePlots wrapper --- docs/Users_Guide/glossary.rst | 125 +- docs/Users_Guide/installation.rst | 5 - docs/Users_Guide/systemconfiguration.rst | 2 +- docs/Users_Guide/wrappers.rst | 85 -- .../examples/plot_emc_grid2grid_anom.conf | 133 -- .../examples/plot_emc_grid2grid_pres.conf | 128 -- .../examples/plot_emc_grid2grid_sfc.conf | 172 --- .../examples/plot_emc_grid2obs_conus_sfc.conf | 129 -- .../examples/plot_emc_grid2obs_upper_air.conf | 125 -- .../examples/plot_emc_precip_ccpa.conf | 114 -- .../examples/plot_user_plotting_scripts.conf | 114 -- .../plotting/met_config/STATAnalysisConfig | 93 -- .../plotting/make_plots/test_make_plots.conf | 120 -- .../make_plots/test_make_plots_wrapper.py | 105 -- .../config_metplus/test_config_metplus.py | 2 - .../pytests/util/met_util/test_met_util.py | 1 - .../wrappers/stat_analysis/test_plotting.conf | 127 -- .../test_stat_analysis_plotting.py | 407 ------ metplus/util/config_metplus.py | 5 - metplus/util/doc_util.py | 1 - metplus/wrappers/__init__.py | 1 - metplus/wrappers/make_plots_wrapper.py | 309 ----- metplus/wrappers/stat_analysis_wrapper.py | 387 +----- ush/plotting_scripts/plot_date_by_level.py | 819 ------------ ush/plotting_scripts/plot_lead_average.py | 657 ---------- ush/plotting_scripts/plot_lead_by_date.py | 776 ------------ ush/plotting_scripts/plot_lead_by_level.py | 707 ----------- ush/plotting_scripts/plot_stat_by_level.py | 504 -------- .../plot_threshold_average.py | 649 ---------- .../plot_threshold_by_lead.py | 700 ---------- ush/plotting_scripts/plot_time_series.py | 775 ----------- ush/plotting_scripts/plot_util.py | 1128 ----------------- 32 files changed, 73 insertions(+), 9332 deletions(-) delete mode 100644 internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf delete mode 100644 internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf delete mode 100644 internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf delete mode 100644 internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf delete mode 100644 internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf delete mode 100644 internal/tests/plotting/examples/plot_emc_precip_ccpa.conf delete mode 100644 internal/tests/plotting/examples/plot_user_plotting_scripts.conf delete mode 100644 internal/tests/plotting/met_config/STATAnalysisConfig delete mode 100644 internal/tests/pytests/plotting/make_plots/test_make_plots.conf delete mode 100644 internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py delete mode 100644 internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf delete mode 100644 internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py delete mode 100755 metplus/wrappers/make_plots_wrapper.py delete mode 100644 ush/plotting_scripts/plot_date_by_level.py delete mode 100644 ush/plotting_scripts/plot_lead_average.py delete mode 100644 ush/plotting_scripts/plot_lead_by_date.py delete mode 100644 ush/plotting_scripts/plot_lead_by_level.py delete mode 100644 ush/plotting_scripts/plot_stat_by_level.py delete mode 100644 ush/plotting_scripts/plot_threshold_average.py delete mode 100644 ush/plotting_scripts/plot_threshold_by_lead.py delete mode 100644 ush/plotting_scripts/plot_time_series.py delete mode 100644 ush/plotting_scripts/plot_util.py diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index dbe419ef6e..33cf89fb3c 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -114,37 +114,12 @@ METplus Configuration Glossary GROUP_LIST_ITEMS Names of the lists in the METplus .conf file to treat the items in those lists as a group. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis LOOP_LIST_ITEMS Names of the lists in the METplus .conf file to treat the items in those lists individually. - | *Used by:* MakePlots, StatAnalysis - - MAKE_PLOTS_AVERAGE_METHOD - The method to use to average the data. Valid options are MEAN, MEDIAN, and AGGREGATION. - - | *Used by:* MakePlots - - MAKE_PLOTS_SCRIPTS_DIR - Directory to find scripts used by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_INPUT_DIR - Directory containing input files used by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_OUTPUT_DIR - Directory to write files generated by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_VERIF_CASE - Verification case used by MakePlots. Valid options for this include: grid2grid, grid2obs, precip. - - | *Used by:* MakePlots + | *Used by:* StatAnalysis CYCLONE_PLOTTER_OUTPUT_DIR Directory for saving files generated by CyclonePlotter. @@ -766,14 +741,6 @@ METplus Configuration Glossary BMODEL .. warning:: **DEPRECATED:** Please use :term:`TC_STAT_BMODEL`. - CI_METHOD - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_CI_METHOD`. - - MAKE_PLOTS_CI_METHOD - The method for creating confidence intervals. Valid options are EMC, or NONE. - - | *Used by:* MakePlots - CYCLONE_CIRCLE_MARKER_SIZE .. warning:: **DEPRECATED:** Please use :term:`CYCLONE_PLOTTER_CIRCLE_MARKER_SIZE`. @@ -811,7 +778,7 @@ METplus Configuration Glossary COV_THRESH_LIST Specify the values of the COV_THRESH column in the MET .stat file to use; - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis CYCLONE_CROSS_MARKER_SIZE .. warning:: **DEPRECATED:** Please use :term:`CYCLONE_PLOTTER_CROSS_MARKER_SIZE`. @@ -910,12 +877,12 @@ METplus Configuration Glossary DESC_LIST A single value or list of values used in the stat_analysis data stratification. Specifies the values of the DESC column in the MET .stat file to use. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis ALPHA_LIST A single value or list of values used in the stat_analysis data stratification. Specifies the values of the ALPHA column in the MET .stat file to use. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis DLAND_FILE .. warning:: **DEPRECATED:** Please use :term:`TC_PAIRS_DLAND_FILE`. @@ -1124,14 +1091,6 @@ METplus Configuration Glossary | *Used by:* EnsembleStat - EVENT_EQUALIZATION - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_EVENT_EQUALIZATION`. - - MAKE_PLOTS_EVENT_EQUALIZATION - If event equalization is to be used (True) or not (False). If set to True, if any of the listed models are missing data for a particular time, data for all models will be masked out for this time. If set to False, there are no changes to the data. - - | *Used by:* MakePlots - EXTRACT_OUT_DIR .. warning:: **DEPRECATED:** Please use :term:`EXTRACT_TILES_OUTPUT_DIR`. @@ -2173,12 +2132,12 @@ METplus Configuration Glossary FCST_INIT_HOUR_LIST Specify a list of hours for initialization times of forecast files for use in the analysis. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis OBS_INIT_HOUR_LIST Specify a list of hours for initialization times of observation files for use in the analysis. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis INIT_HOUR_BEG .. warning:: **DEPRECATED:** Please use :term:`FCST_INIT_HOUR_LIST` or :term:`OBS_INIT_HOUR_LIST` instead. @@ -2221,7 +2180,7 @@ METplus Configuration Glossary INTERP_MTHD_LIST Specify the values of the INTERP_MTHD column in the MET .stat file to use; specify the interpolation used to create the MET .stat files. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis INTERP_PTS .. warning:: **DEPRECATED:** Please use :term:`INTERP_PNTS_LIST` instead. @@ -2229,7 +2188,7 @@ METplus Configuration Glossary INTERP_PNTS_LIST Specify the values of the INTERP_PNTS column in the MET .stat file to use; corresponds to the interpolation in the MET .stat files. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis INTERVAL_TIME Define the interval time in hours (HH) to be used by the MET pb2nc tool. @@ -2274,12 +2233,12 @@ METplus Configuration Glossary FCST_LEAD_LIST Specify the values of the FSCT_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis OBS_LEAD_LIST Specify the values of the OBS_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis LEAD_SEQ Specify the sequence of forecast lead times to include in the analysis. Comma separated list format, e.g.:0, 6, 12. See :ref:`looping_over_forecast_leads` for more information. Units are assumed to be hours unless specified with Y, m, d, H, M, or S. @@ -2321,7 +2280,7 @@ METplus Configuration Glossary LINE_TYPE_LIST Specify the MET STAT line types to be considered. For TCMPRPlotter, this is optional in the METplus configuration file for running with :term:`LOOP_ORDER` = times. - | *Used by:* MakePlots, StatAnalysis, TCMPRPlotter + | *Used by:* StatAnalysis, TCMPRPlotter LOG_DIR Specify the directory where log files from MET and METplus should be written. @@ -2447,7 +2406,7 @@ METplus Configuration Glossary MODEL_LIST List of the specified the model names. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis MODEL_NAME .. warning:: **DEPRECATED:** Please use :term:`MODEL\`. @@ -2460,7 +2419,7 @@ METplus Configuration Glossary | ... | MODEL - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis MODEL_NAME_ON_PLOT .. warning:: **DEPRECATED:** Please use :term:`MODEL_REFERENCE_NAME` instead. @@ -2473,7 +2432,7 @@ METplus Configuration Glossary | ... | MODELN_REFERENCE_NAME - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis MODEL_OBS_NAME .. warning:: **DEPRECATED:** Please use :term:`MODEL_OBTYPE` instead. @@ -2486,7 +2445,7 @@ METplus Configuration Glossary | ... | MODEL_OBTYPE - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis MODEL_STAT_DIR .. warning:: **DEPRECATED:** Please use :term:`MODEL_STAT_ANALYSIS_LOOKIN_DIR` instead. @@ -3445,12 +3404,6 @@ METplus Configuration Glossary | *Used by:* PCPCombine - PLOTTING_OUTPUT_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_OUTPUT_DIR` instead. - - PLOTTING_SCRIPTS_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_SCRIPTS_DIR` instead. - PLOT_CONFIG_OPTS .. warning:: **DEPRECATED:** Please use :term:`TCMPR_PLOTTER_PLOT_CONFIG_OPTS` instead. @@ -3459,25 +3412,13 @@ METplus Configuration Glossary | *Used by:* TCMPRPlotter - PLOT_STATS_LIST - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_STATS_LIST` instead. - - MAKE_PLOTS_STATS_LIST - This is a list of the statistics to calculate and create plots for. Specify the list in a comma-separated list, e.g.: - - acc, bias, rmse - - The list of valid options varies depending on line type that was used during the filtering of stat_analysis_wrapper. For SL1L2, VL1L2 valid options are bias, rms, msess, rsd, rmse_md, rmse_pv, pcor, fbar, and fbar_obar. For SAL1L2, VAL1L2, the valid options is acc. For VCNT, bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, rsd, fbar_speed, fbar_dir, fbar_obar_speed, and fbar_obar_dir. For CTC, rate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs - - | *Used by:* MakePlots - PLOT_TIME .. warning:: **DEPRECATED:** Please use :term:`DATE_TYPE` instead. DATE_TYPE In StatAnalysis, this specifies the way to treat the date information, where valid options are VALID and INIT. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis PLOT_TYPES .. warning:: **DEPRECATED:** Please use :term:`TCMPR_PLOTTER_PLOT_TYPES` instead. @@ -3590,7 +3531,7 @@ METplus Configuration Glossary VX_MASK_LIST Specify the values of the VX_MASK column in the MET .stat file to use; a list of the verification regions of interest. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis POINT2GRID_REGRID_METHOD Sets the gridding method used by point2grid. @@ -3865,9 +3806,6 @@ METplus Configuration Glossary | *Used by:* StatAnalysis - STAT_FILES_INPUT_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_INPUT_DIR` instead. - SERIES_ANALYSIS_STAT_LIST .. warning:: **DEPRECATED:** Please use :term:`SERIES_ANALYSIS_OUTPUT_STATS_CNT` instead. @@ -4331,12 +4269,12 @@ METplus Configuration Glossary FCST_VALID_HOUR_LIST Specify a list of hours for valid times of forecast files for use in the analysis. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis OBS_VALID_HOUR_LIST Specify a list of hours for valid times of observation files for use in the analysis. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis VALID_HOUR_BEG .. warning:: **DEPRECATED:** Please use :term:`FCST_VALID_HOUR_LIST` or :term:`OBS_VALID_HOUR_LIST` instead. @@ -4369,35 +4307,16 @@ METplus Configuration Glossary VAR_FOURIER_DECOMP Specify if Fourier decomposition is to be considered (True) or not (False). If this is set to True, data stratification will be done for the Fourier decomposition of FCS_VAR_NAME. This should have been previously run in grid_stat_wrapper. The default value is set to False. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis VAR_WAVE_NUM_LIST Specify a comma separated list of wave numbers pairings of the Fourier decomposition. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis VERIFICATION_GRID .. warning:: **DEPRECATED:** Please use :term:`REGRID_DATA_PLANE_VERIF_GRID` instead. - VERIF_CASE - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_CASE` instead. - - VERIF_GRID - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_GRID` instead. - - MAKE_PLOTS_VERIF_GRID - Specify a string describing the grid the verification was performed on. This is the name of the grid upon which the verification was done on, ex. G002. - - | *Used by:* MakePlots - - VERIF_TYPE - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_TYPE` instead. - - MAKE_PLOTS_VERIF_TYPE - Specify a string describing the type of verification being performed. For MAKE_PLOTS_VERIF_CASE = grid2grid, valid options are anom, pres, and sfc. For MAKE_PLOTS_VERIF_CASE = grid2obs, valid options are conus_sfc and upper_air. For MAKE_PLOTS_VERIF_CASE = precip, any accumulation amount is valid, ex. A24. - - | *Used by:* MakePlots - VERTICAL_LOCATION .. warning:: **DEPRECATED:** Specify the vertical location desired when using the MET pb2nc tool. diff --git a/docs/Users_Guide/installation.rst b/docs/Users_Guide/installation.rst index a935998ab7..dcde8cefa1 100644 --- a/docs/Users_Guide/installation.rst +++ b/docs/Users_Guide/installation.rst @@ -113,11 +113,6 @@ to run. - netCDF4 (1.5.4) -- MakePlots wrapper - - - cartopy (0.20.3) - - pandas (1.4.3) - - CyclonePlotter wrapper - cartopy (0.20.3) diff --git a/docs/Users_Guide/systemconfiguration.rst b/docs/Users_Guide/systemconfiguration.rst index e9e3868f2d..14fecb13f7 100644 --- a/docs/Users_Guide/systemconfiguration.rst +++ b/docs/Users_Guide/systemconfiguration.rst @@ -1145,7 +1145,7 @@ paths, and more. The value of each list item can be referenced in the METplus configuration variables by using {custom?fmt=%s}. The variable CUSTOM_LOOP_LIST will apply the values to each wrapper in the PROCESS_LIST unless the wrapper does not support this functionality. CyclonePlotter, -MakePlots, SeriesByInit, SeriesByLead, StatAnalysis, TCStat, and +StatAnalysis, TCStat, and TCMPRPlotter wrappers are not supported. If the variable is not set or set to an empty string, the wrapper will execute as normal without additional runs. The name of the wrapper-specific variables contain the name of the diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 0956b88bef..954eafc2fc 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -3684,91 +3684,6 @@ see :ref:`How METplus controls MET config file settings`. * - :term:`IODA2NC_MET_CONFIG_OVERRIDES` - n/a -.. _make_plots_wrapper: - -MakePlots -========= - -Description ------------ - -The MakePlots wrapper creates various statistical plots using python -scripts for the various METplus Wrappers use cases. This can only be run -following StatAnalysis wrapper when LOOP_ORDER = processes. To run -MakePlots wrapper, include MakePlots in PROCESS_LIST. - -METplus Configuration ---------------------- - -The following values **must** be defined in the METplus Wrappers -configuration file: - -| :term:`MAKE_PLOTS_SCRIPTS_DIR` -| :term:`MAKE_PLOTS_INPUT_DIR` -| :term:`MAKE_PLOTS_OUTPUT_DIR` -| :term:`MAKE_PLOTS_VERIF_CASE` -| :term:`MAKE_PLOTS_VERIF_TYPE` -| :term:`DATE_TYPE` -| :term:`MODEL\` -| :term:`MODEL_OBTYPE` -| :term:`MODEL_REFERENCE_NAME` -| :term:`GROUP_LIST_ITEMS` -| :term:`LOOP_LIST_ITEMS` -| :term:`MODEL_LIST` -| :term:`FCST_LEAD_LIST` -| :term:`VX_MASK_LIST` -| :term:`LINE_TYPE_LIST` -| :term:`MAKE_PLOTS_AVERAGE_METHOD` -| :term:`MAKE_PLOTS_STATS_LIST` -| :term:`MAKE_PLOTS_CI_METHOD` -| :term:`MAKE_PLOTS_VERIF_GRID` -| :term:`MAKE_PLOTS_EVENT_EQUALIZATION` -| - -The following values are **optional** in the METplus Wrappers -configuration file: - -| :term:`VAR_FOURIER_DECOMP` -| :term:`VAR_WAVE_NUM_LIST` -| :term:`FCST_VALID_HOUR_LIST` -| :term:`OBS_VALID_HOUR_LIST` -| :term:`FCST_INIT_HOUR_LIST` -| :term:`OBS_INIT_HOUR_LIST` -| :term:`OBS_LEAD_LIST` -| :term:`DESC_LIST` -| :term:`INTERP_MTHD_LIST` -| :term:`INTERP_PNTS_LIST` -| :term:`COV_THRESH_LIST` -| :term:`ALPHA_LIST` -| - -.. warning:: **DEPRECATED:** - - | :term:`PLOTTING_SCRIPTS_DIR` - | :term:`STAT_FILES_INPUT_DIR` - | :term:`PLOTTING_OUTPUT_DIR` - | :term:`VERIF_CASE` - | :term:`VERIF_TYPE` - | :term:`PLOT_TIME` - | :term:`MODEL_NAME` - | :term:`MODEL_OBS_NAME` - | :term:`MODEL_NAME_ON_PLOT` - | :term:`VALID_HOUR_METHOD` - | :term:`VALID_HOUR_BEG` - | :term:`VALID_HOUR_END` - | :term:`VALID_HOUR_INCREMENT` - | :term:`INIT_HOUR_BEG` - | :term:`INIT_HOUR_END` - | :term:`INIT_HOUR_INCREMENT` - | :term:`REGION_LIST` - | :term:`LEAD_LIST` - | :term:`LINE_TYPE` - | :term:`INTERP` - | :term:`PLOT_STATS_LIST` - | :term:`CI_METHOD` - | :term:`VERIF_GRID` - | :term:`EVENT_EQUALIZATION` - | .. _met_db_load_wrapper: diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf deleted file mode 100644 index 76a68e8347..0000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf +++ /dev/null @@ -1,133 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/anom -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/anom -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/anom/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P700, P500, P250 -VAR1_FOURIER_DECOMP = True -VAR1_WAVE_NUM_LIST = 0-3, 4-9, 10-20, 0-20 - -FCST_VAR2_NAME = HGT -FCST_VAR2_LEVELS = P1000, P700, P500, P250 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P850, P500, P250 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P850, P500, P250 - -FCST_VAR5_NAME = VGRD -FCST_VAR5_LEVELS = P850, P500, P250 - -FCST_VAR6_NAME = TMP -FCST_VAR6_LEVELS = P850, P500, P250 - -FCST_VAR7_NAME = PRMSL -FCST_VAR7_LEVELS = Z0 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, PNA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = anom -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SAL1L2, VAL1L2 -MAKE_PLOTS_STATS_LIST = acc -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf deleted file mode 100644 index 6c88910e75..0000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf +++ /dev/null @@ -1,128 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/pres -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/pres -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/pres/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR2_NAME = TMP -FCST_VAR2_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR5_NAME = VGRD -FCST_VAR5_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR6_NAME = O3MR -FCST_VAR6_LEVELS = P100, P70, P50, P30, P20, P10 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, PNA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf deleted file mode 100644 index 775102a5df..0000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf +++ /dev/null @@ -1,172 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/sfc -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/sfc -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/sfc/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = Z2 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = Z2 - -FCST_VAR3_NAME = SPFH -FCST_VAR3_LEVELS = Z2 - -FCST_VAR4_NAME = HPBL -FCST_VAR4_LEVELS = L0 - -FCST_VAR5_NAME = PRES -FCST_VAR5_LEVELS = Z0 - -FCST_VAR6_NAME = PRMSL -FCST_VAR6_OPTIONS = GRIB_lvl_typ = 102; -FCST_VAR6_LEVELS = L0 - -FCST_VAR7_NAME = TMP -FCST_VAR7_LEVELS = Z0 - -FCST_VAR8_NAME = UGRD -FCST_VAR8_LEVELS = Z10 - -FCST_VAR9_NAME = VGRD -FCST_VAR9_LEVELS = Z10 - -FCST_VAR10_NAME = TSOIL -FCST_VAR10_OPTIONS = GRIB_lvl_typ = 112; -FCST_VAR10_LEVELS = Z0-10 - -FCST_VAR11_NAME = SOILW -FCST_VAR11_OPTIONS = GRIB_lvl_typ = 112; -FCST_VAR11_LEVELS = Z0-10 - -FCST_VAR12_NAME = WEASD -FCST_VAR12_OPTIONS = GRIB_lvl_typ = 01; - -FCST_VAR13_NAME = CAPE -FCST_VAR13_LEVELS = Z0 - -FCST_VAR14_NAME = CWAT -FCST_VAR14_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR14_LEVELS = L0 - -FCST_VAR15_NAME = PWAT -FCST_VAR15_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR15_LEVELS = L0 - -FCST_VAR16_NAME = TMP -FCST_VAR16_OPTIONS = GRIB_lvl_typ = 07; -FCST_VAR16_LEVELS = L0 - -FCST_VAR17_NAME = HGT -FCST_VAR17_OPTIONS = GRIB_lvl_typ = 07; -FCST_VAR17_LEVELS = L0 - -FCST_VAR18_NAME = TOZNE -FCST_VAR18_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR18_LEVELS = L0 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, N60, S60, NPO, SPO, NAO, SAO, CONUS, CAM, NSA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = sfc -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = fbar -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf b/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf deleted file mode 100644 index 5111950972..0000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf +++ /dev/null @@ -1,129 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2obs/conus_sfc -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2obs/conus_sfc -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = INIT -# blank or YYYYmmDD format -INIT_BEG = 20170601 -INIT_END = 20170603 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_VALID_HOUR_LIST -LOOP_LIST_ITEMS = FCST_INIT_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = gfs -MODEL1_OBTYPE = ONLYSF -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2obs/{init_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = ops_gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = Z2 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = Z2 - -FCST_VAR3_NAME = DPT -FCST_VAR3_LEVELS = Z2 - -FCST_VAR4_NAME = UGRD_VGRD -FCST_VAR4_LEVELS = Z10 - -FCST_VAR5_NAME = TCDC -FCST_VAR5_LEVELS = L0 - -FCST_VAR6_NAME = PRMSL -FCST_VAR7_LEVELS = Z0 - - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 06, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156 -VX_MASK_LIST = CONUS, EAST, WEST, MDW, NPL, SPL, NEC, SEC, NMT SMT, SWD, GRB, LMV, GMC, APL -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2obs -MAKE_PLOTS_VERIF_TYPE = conus_sfc -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, fbar_obar -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G104 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf b/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf deleted file mode 100644 index dbb865bd48..0000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf +++ /dev/null @@ -1,125 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2obs/upper_air -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2obs/upper_air -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = INIT -# blank or YYYYmmDD format -INIT_BEG = 20170601 -INIT_END = 20170603 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_VALID_HOUR_LIST -LOOP_LIST_ITEMS = FCST_INIT_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = gfs -MODEL1_OBTYPE = ONLYSF -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2obs/{init_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR5_NAME = VGDRD -FCST_VAR5_LEVELS = P850, P500, P200, P50, P10 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 06, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156 -VX_MASK_LIST = G003, NH, SH, TRO, G236 -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2obs -MAKE_PLOTS_VERIF_TYPE = upper_air -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G003 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf b/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf deleted file mode 100644 index 841a4b4514..0000000000 --- a/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf +++ /dev/null @@ -1,114 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/precip/ccpa -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/precip/ccpa -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 12 -FCST_INIT_HOUR_LIST = 00, 12 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/precip/ccpa/*/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = APCP -FCST_VAR1_LEVELS = A24 -FCST_VAR1_THRESH = >=0.2, >=2, >=5, >=10, >=15, >=25, >=35, >=50, >=75 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156, 168, 180 -VX_MASK_LIST = CONUS, EAST, WEST -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = precip -MAKE_PLOTS_VERIF_TYPE = ccpa -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = CTC -MAKE_PLOTS_STATS_LIST = bias, ets -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = AGGREGATION -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC_MONTE_CARLO -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G211 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_user_plotting_scripts.conf b/internal/tests/plotting/examples/plot_user_plotting_scripts.conf deleted file mode 100644 index 87c82556d9..0000000000 --- a/internal/tests/plotting/examples/plot_user_plotting_scripts.conf +++ /dev/null @@ -1,114 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/anom_HGT -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/anom_HGT -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/anom/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P700, P500, P250 -VAR1_FOURIER_DECOMP = True -VAR1_WAVE_NUM_LIST = 0-3, 4-9, 10-20, 0-20 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_USER_SCRIPT_LIST = plot_time_series.py -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SAL1L2, VAL1L2 -MAKE_PLOTS_STATS_LIST = acc -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/met_config/STATAnalysisConfig b/internal/tests/plotting/met_config/STATAnalysisConfig deleted file mode 100644 index 74e002049d..0000000000 --- a/internal/tests/plotting/met_config/STATAnalysisConfig +++ /dev/null @@ -1,93 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// -// STAT-Analysis configuration file. -// -// For additional information, see the MET_BASE/config/README file. -// -//////////////////////////////////////////////////////////////////////////////// - -// -// Filtering input STAT lines by the contents of each column -// -${METPLUS_MODEL} -${METPLUS_DESC} - -fcst_lead = [${FCST_LEAD}]; -obs_lead = [${OBS_LEAD}]; - -fcst_valid_beg = "${FCST_VALID_BEG}"; -fcst_valid_end = "${FCST_VALID_END}"; -fcst_valid_hour = [${FCST_VALID_HOUR}]; - -obs_valid_beg = "${OBS_VALID_BEG}"; -obs_valid_end = "${OBS_VALID_END}"; -obs_valid_hour = [${OBS_VALID_HOUR}]; - -fcst_init_beg = "${FCST_INIT_BEG}"; -fcst_init_end = "${FCST_INIT_END}"; -fcst_init_hour = [${FCST_INIT_HOUR}]; - -obs_init_beg = "${OBS_INIT_BEG}"; -obs_init_end = "${OBS_INIT_END}"; -obs_init_hour = [${OBS_INIT_HOUR}]; - -fcst_var = [${FCST_VAR}]; -obs_var = [${OBS_VAR}]; - -fcst_units = [${FCST_UNITS}]; -obs_units = [${OBS_UNITS}]; - -fcst_lev = [${FCST_LEVEL}]; -obs_lev = [${OBS_LEVEL}]; - -${METPLUS_OBTYPE} - -vx_mask = [${VX_MASK}]; - -interp_mthd = [${INTERP_MTHD}]; - -interp_pnts = [${INTERP_PNTS}]; - -fcst_thresh = [${FCST_THRESH}]; -obs_thresh = [${OBS_THRESH}]; -cov_thresh = [${COV_THRESH}]; - -alpha = [${ALPHA}]; - -line_type = [${LINE_TYPE}]; - -column = []; - -weight = []; - -//////////////////////////////////////////////////////////////////////////////// - -// -// Array of STAT-Analysis jobs to be performed on the filtered data -// -jobs = [ - "${JOB}" -]; - -//////////////////////////////////////////////////////////////////////////////// - -// -// Confidence interval settings -// -out_alpha = 0.05; - -boot = { - interval = PCTILE; - rep_prop = 1.0; - n_rep = 0; - rng = "mt19937"; - seed = ""; -} - -//////////////////////////////////////////////////////////////////////////////// - -rank_corr_flag = FALSE; -vif_flag = FALSE; -tmp_dir = "/tmp"; - -//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf deleted file mode 100644 index 4bad57f747..0000000000 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf +++ /dev/null @@ -1,120 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20190101 -VALID_END = 20190101 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00, 06, 12, 18 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = MODEL_TEST1 -MODEL1_OBTYPE = MODEL_TEST1_ANL -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL1_REFERENCE_NAME = MODEL_TEST1 - -MODEL2 = MODEL_TEST2 -MODEL2_OBTYPE = ANLYS2 -MODEL2_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL2_REFERENCE_NAME = TEST2_MODEL - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P850 -OBS_VAR1_NAME = HGT -OBS_VAR1_LEVELS = P1000, P850 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1}, {MODEL2} -FCST_LEAD_LIST = 24, 48 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py deleted file mode 100644 index 5ee62c781a..0000000000 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os - -from metplus.wrappers.make_plots_wrapper import MakePlotsWrapper - -METPLUS_BASE = os.getcwd().split('/internal')[0] - - -def make_plots_wrapper(metplus_config): - """! Returns a default MakePlotsWrapper with /path/to entries in the - metplus_system.conf and metplus_runtime.conf configuration - files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # Default, empty MakePlotsWrapper with some configuration values set - # to /path/to: - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test_make_plots.conf')) - config = metplus_config(extra_configs) - return MakePlotsWrapper(config) - - -@pytest.mark.plotting -def test_get_command(metplus_config): - # Independently test that the make_plots python - # command is being put together correctly with - # python command followed by the full path - # to the plotting script - mp = make_plots_wrapper(metplus_config) - # Test 1 - expected_command = ( - 'python plot_fake_script_name.py' - ) - mp.plotting_script = 'plot_fake_script_name.py' - test_command = mp.get_command() - assert(expected_command == test_command) - - -@pytest.mark.plotting -def test_create_c_dict(metplus_config): - # Independently test that c_dict is being created - # and that the wrapper and config reader - # is setting the values as expected - mp = make_plots_wrapper(metplus_config) - # Test 1 - c_dict = mp.create_c_dict() - assert(c_dict['LOOP_ORDER'] == 'processes') - # NOTE: MakePlots relies on output from StatAnalysis - # so its input resides in the output of StatAnalysis - assert(c_dict['INPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') - +'/plotting/stat_analysis') - assert(c_dict['OUTPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') - +'/plotting/make_plots') - assert(os.path.realpath(c_dict['SCRIPTS_BASE_DIR']) == METPLUS_BASE+'/ush/plotting_scripts') - assert(c_dict['DATE_TYPE'] == 'VALID') - assert(c_dict['VALID_BEG'] == '20190101') - assert(c_dict['VALID_END'] == '20190101') - assert(c_dict['INIT_BEG'] == '') - assert(c_dict['INIT_END'] == '') - assert(c_dict['GROUP_LIST_ITEMS'] == [ 'FCST_INIT_HOUR_LIST' ]) - assert(c_dict['LOOP_LIST_ITEMS'] == [ 'FCST_VALID_HOUR_LIST' ]) - assert(c_dict['VAR_LIST'] == [{'fcst_name': 'HGT', - 'fcst_output_name': 'HGT', - 'obs_name': 'HGT', - 'obs_output_name': 'HGT', - 'fcst_extra': '', 'obs_extra': '', - 'fcst_thresh': [], 'obs_thresh': [], - 'fcst_level': 'P1000', - 'obs_level': 'P1000', 'index': '1'}, - {'fcst_name': 'HGT', - 'fcst_output_name': 'HGT', - 'obs_name': 'HGT', - 'obs_output_name': 'HGT', - 'fcst_extra': '', 'obs_extra': '', - 'fcst_thresh': [], 'obs_thresh': [], - 'fcst_level': 'P850', - 'obs_level': 'P850', 'index': '1'}]) - assert(c_dict['MODEL_LIST'] == [ 'MODEL_TEST1', 'MODEL_TEST2']) - assert(c_dict['DESC_LIST'] == []) - assert(c_dict['FCST_LEAD_LIST'] == [ '24', '48' ]) - assert(c_dict['OBS_LEAD_LIST'] == []) - assert(c_dict['FCST_VALID_HOUR_LIST'] == [ '00', '06', '12', '18' ]) - assert(c_dict['FCST_INIT_HOUR_LIST'] == [ '00', '06', '12', '18' ]) - assert(c_dict['OBS_VALID_HOUR_LIST'] == []) - assert(c_dict['OBS_INIT_HOUR_LIST'] == []) - assert(c_dict['VX_MASK_LIST'] == [ 'NHX' ]) - assert(c_dict['INTERP_MTHD_LIST'] == []) - assert(c_dict['INTERP_PNTS_LIST'] == []) - assert(c_dict['COV_THRESH_LIST'] == []) - assert(c_dict['ALPHA_LIST'] == []) - assert(c_dict['LINE_TYPE_LIST'] == [ 'SL1L2', 'VL1L2' ]) - assert(c_dict['USER_SCRIPT_LIST'] == []) - assert(c_dict['VERIF_CASE'] == 'grid2grid') - assert(c_dict['VERIF_TYPE'] == 'pres') - assert(c_dict['STATS_LIST'] == [ 'bias', 'rmse', 'msess', 'rsd', - 'rmse_md', 'rmse_pv', 'pcor' ]) - assert(c_dict['AVERAGE_METHOD'] == 'MEAN') - assert(c_dict['CI_METHOD'] == 'EMC') - assert(c_dict['VERIF_GRID'] == 'G002') - assert(c_dict['EVENT_EQUALIZATION'] == 'False') - assert(c_dict['LOG_METPLUS'] == mp.config.getdir('OUTPUT_BASE') - +'/logs/metplus.log') diff --git a/internal/tests/pytests/util/config_metplus/test_config_metplus.py b/internal/tests/pytests/util/config_metplus/test_config_metplus.py index 07a32655f5..8332aba14c 100644 --- a/internal/tests/pytests/util/config_metplus/test_config_metplus.py +++ b/internal/tests/pytests/util/config_metplus/test_config_metplus.py @@ -995,8 +995,6 @@ def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, 'ASCII2NC', 'TCStat', 'TCPairs']), - # remove MakePlots from list - ('StatAnalysis, MakePlots', ['StatAnalysis']), ] ) @pytest.mark.util diff --git a/internal/tests/pytests/util/met_util/test_met_util.py b/internal/tests/pytests/util/met_util/test_met_util.py index 7d0df8dd50..797f313e87 100644 --- a/internal/tests/pytests/util/met_util/test_met_util.py +++ b/internal/tests/pytests/util/met_util/test_met_util.py @@ -297,7 +297,6 @@ def test_get_lead_sequence_init_min_10(metplus_config): ('GempakToCFWrapper', 'gempak_to_cf_wrapper'), ('GenVxMaskWrapper', 'gen_vx_mask_wrapper'), ('GridStatWrapper', 'grid_stat_wrapper'), - ('MakePlotsWrapper', 'make_plots_wrapper'), ('MODEWrapper', 'mode_wrapper'), ('MTDWrapper', 'mtd_wrapper'), ('PB2NCWrapper', 'pb2nc_wrapper'), diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf b/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf deleted file mode 100644 index 09a5267381..0000000000 --- a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf +++ /dev/null @@ -1,127 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots -# Location of configuration files used by MET applications -CONFIG_DIR = {METPLUS_BASE}/internal/tests/plotting/met_config - -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL2_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis - -[filename_templates] -STAT_ANALYSIS_DUMP_ROW_TEMPLATE = {model?fmt=%s}_{obtype?fmt=%s}_valid{valid_beg?fmt=%Y%m%d}to{valid_end?fmt=%Y%m%d}_valid{valid_hour_beg?fmt=%H%M}to{valid_hour_end?fmt=%H%M}Z_init{init_hour_beg?fmt=%H%M}to{init_hour_end?fmt=%H%M}Z_fcst_lead{fcst_lead?fmt=%s}_fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}{fcst_thresh?fmt=%s}{interp_mthd?fmt=%s}_obs{obs_var?fmt=%s}{obs_level?fmt=%s}{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_vxmask{vx_mask?fmt=%s}_dump_row.stat - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20190101 -VALID_END = 20190101 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00, 06, 12, 18 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = MODEL_TEST1 -MODEL1_OBTYPE = MODEL_TEST1_ANL -MODEL1_REFERENCE_NAME = MODEL_TEST1 - -MODEL2 = TEST2_MODEL -MODEL2_OBTYPE = ANLYS2 -MODEL2_REFERENCE_NAME = TEST2_MODEL - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -BOTH_VAR1_NAME = HGT -BOTH_VAR1_LEVELS = P1000, P850 -#FCST_VAR1_NAME = HGT -#FCST_VAR1_LEVELS = P1000, P850 - -STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped - -STAT_ANALYSIS_JOB_NAME = filter -STAT_ANALYSIS_JOB_ARGS = -dump_row [dump_row_file] - -# REQUIRED LISTS -MODEL_LIST = {MODEL1}, {MODEL2} -FCST_LEAD_LIST = 24, 48 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py deleted file mode 100644 index ef6bcd72de..0000000000 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os - -import glob - -from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper -from metplus.util import handle_tmp_dir - -METPLUS_BASE = os.getcwd().split('/internal')[0] - - -def stat_analysis_wrapper(metplus_config): - """! Returns a default StatAnalysisWrapper with /path/to entries in the - metplus_system.conf and metplus_runtime.conf configuration - files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # Default, empty StatAnalysisWrapper with some configuration values set - # to /path/to: - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test_plotting.conf')) - config = metplus_config(extra_configs) - handle_tmp_dir(config) - return StatAnalysisWrapper(config) - - -@pytest.mark.plotting -def test_set_lists_as_loop_or_group(metplus_config): - # Independently test that the lists that are set - # in the config file are being set - # accordingly based on their place - # in GROUP_LIST_ITEMS and LOOP_LIST_ITEMS - # and those not set are set to GROUP_LIST_ITEMS - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_lists_to_group_items = ['FCST_INIT_HOUR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', - 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', - 'DESC_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - expected_lists_to_loop_items = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', - 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_LEVEL_LIST', 'OBS_LEVEL_LIST', - 'FCST_LEAD_LIST', 'VX_MASK_LIST'] - - config_dict = {} - config_dict['LOOP_ORDER'] = 'processes' - config_dict['OUTPUT_BASE_DIR'] = 'OUTPUT_BASE/stat_analysis' - config_dict['GROUP_LIST_ITEMS'] = ['FCST_INIT_HOUR_LIST'] - config_dict['LOOP_LIST_ITEMS'] = ['FCST_VALID_HOUR_LIST'] - config_dict['FCST_VAR_LIST'] = ['HGT'] - config_dict['OBS_VAR_LIST'] = ['HGT'] - config_dict['FCST_LEVEL_LIST'] = ['P1000', 'P500'] - config_dict['OBS_LEVEL_LIST'] = ['P1000', 'P500'] - config_dict['FCST_UNITS_LIST'] = [] - config_dict['OBS_UNITS_LIST'] = [] - config_dict['FCST_THRESH_LIST'] = [] - config_dict['OBS_THRESH_LIST'] = [] - config_dict['MODEL_LIST'] = ['MODEL_TEST1', 'MODEL_TEST2'] - config_dict['DESC_LIST'] = [] - config_dict['FCST_LEAD_LIST'] = ['24', '48'] - config_dict['OBS_LEAD_LIST'] = [] - config_dict['FCST_VALID_HOUR_LIST'] = ['00', '06', '12', '18'] - config_dict['FCST_INIT_HOUR_LIST'] = ['00', '06', '12', '18'] - config_dict['OBS_VALID_HOUR_LIST'] = [] - config_dict['OBS_INIT_HOUR_LIST'] = [] - config_dict['VX_MASK_LIST'] = ['NHX'] - config_dict['INTERP_MTHD_LIST'] = [] - config_dict['INTERP_PNTS_LIST'] = [] - config_dict['COV_THRESH_LIST'] = [] - config_dict['ALPHA_LIST'] = [] - config_dict['LINE_TYPE_LIST'] = ['SL1L2', 'VL1L2'] - - config_dict = st.set_lists_loop_or_group(config_dict) - - test_lists_to_loop_items = config_dict['LOOP_LIST_ITEMS'] - test_lists_to_group_items = config_dict['GROUP_LIST_ITEMS'] - - assert (all(elem in expected_lists_to_group_items - for elem in test_lists_to_group_items)) - assert (all(elem in expected_lists_to_loop_items - for elem in test_lists_to_loop_items)) - - -@pytest.mark.plotting -def test_get_output_filename(metplus_config): - # Independently test the building of - # the output file name - # using string template substitution# - # and test the values is - # as expected - st = stat_analysis_wrapper(metplus_config) - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_VAR'] = '"HGT"' - config_dict['FCST_LEVEL'] = '"P1000"' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '"NHX"' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '"HGT"' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '"240000"' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '"P1000"' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - - # Test 1 - lists_to_group = ['FCST_INIT_HOUR_LIST', 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'DESC_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', 'OBS_INIT_HOUR_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', - 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_LEVEL_LIST', 'OBS_LEVEL_LIST', - 'FCST_LEAD_LIST', 'VX_MASK_LIST'] - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL_valid20190101to20190101_valid0000to0000Z' - + '_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - + '_dump_row.stat' - ) - output_type = 'dump_row' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}_valid{valid_beg?fmt=%Y%m%d}' - 'to{valid_end?fmt=%Y%m%d}_valid{valid_hour_beg?fmt=%H%M}to' - '{valid_hour_end?fmt=%H%M}Z_init{init_hour_beg?fmt=%H%M}to' - '{init_hour_end?fmt=%H%M}Z_fcst_lead{fcst_lead?fmt=%s}_' - 'fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}{fcst_thresh?fmt=%s}' - '{interp_mthd?fmt=%s}_obs{obs_var?fmt=%s}{obs_level?fmt=%s}' - '{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_vxmask{vx_mask?fmt=%s}' - '_dump_row.stat' - - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - - -@pytest.mark.plotting -def test_filter_for_plotting(metplus_config): - # Test running of stat_analysis - st = stat_analysis_wrapper(metplus_config) - - # clear output directory for next run - output_dir = st.config.getdir('OUTPUT_BASE') + '/plotting/stat_analysis' - output_files = glob.glob(os.path.join(output_dir, '*')) - for output_file in output_files: - os.remove(output_file) - - # Test 1 - expected_filename1 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename2 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename3 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename4 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename5 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename6 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename7 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename8 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename9 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename10 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename11 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename12 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename13 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename14 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename15 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename16 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename17 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename18 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename19 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename20 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename21 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename22 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename23 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename24 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename25 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename26 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename27 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename28 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename29 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename30 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename31 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename32 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename_list = [ expected_filename1, expected_filename2, - expected_filename3, expected_filename4, - expected_filename5, expected_filename6, - expected_filename7, expected_filename8, - expected_filename9, expected_filename10, - expected_filename11, expected_filename12, - expected_filename13, expected_filename14, - expected_filename15, expected_filename16, - expected_filename17, expected_filename18, - expected_filename19, expected_filename20, - expected_filename21, expected_filename22, - expected_filename23, expected_filename24, - expected_filename25, expected_filename26, - expected_filename27, expected_filename28, - expected_filename29, expected_filename30, - expected_filename31, expected_filename32 ] - - # remove expected files before running - for expected_filename in expected_filename_list: - if os.path.exists(expected_filename): - os.remove(expected_filename) - - st.c_dict['DATE_TYPE'] = 'VALID' - st.c_dict['VALID_BEG'] = '20190101' - st.c_dict['VALID_END'] = '20190101' - st.c_dict['INIT_BEG'] = '' - st.c_dict['INIT_END'] = '' - st.c_dict['DATE_BEG'] = st.c_dict['VALID_BEG'] - st.c_dict['DATE_END'] = st.c_dict['VALID_END'] - - st.run_stat_analysis() - ntest_files = len( - os.listdir(st.config.getdir('OUTPUT_BASE') - +'/plotting/stat_analysis') - ) - assert ntest_files == 32 - for expected_filename in expected_filename_list: - assert os.path.exists(expected_filename) diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index 4c776565d7..6eeef89cfd 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -1790,11 +1790,6 @@ def get_process_list(config): "may be invalid.") wrapper_name = process_name - # if MakePlots is in process list, remove it because - # it will be called directly from StatAnalysis - if wrapper_name == 'MakePlots': - continue - out_process_list.append((wrapper_name, instance)) return out_process_list diff --git a/metplus/util/doc_util.py b/metplus/util/doc_util.py index 44ee50ad8a..99275c6a06 100755 --- a/metplus/util/doc_util.py +++ b/metplus/util/doc_util.py @@ -18,7 +18,6 @@ 'griddiag': 'GridDiag', 'gridstat': 'GridStat', 'ioda2nc': 'IODA2NC', - 'makeplots': 'MakePlots', 'metdbload': 'METDbLoad', 'mode': 'MODE', 'mtd': 'MTD', diff --git a/metplus/wrappers/__init__.py b/metplus/wrappers/__init__.py index 7f29a2ef4b..2b556ebd4e 100644 --- a/metplus/wrappers/__init__.py +++ b/metplus/wrappers/__init__.py @@ -9,7 +9,6 @@ plotting_wrappers = [ 'tcmpr_plotter_wrapper', 'cyclone_plotter_wrapper', - 'make_plots_wrapper', ] # import classes that other wrappers import diff --git a/metplus/wrappers/make_plots_wrapper.py b/metplus/wrappers/make_plots_wrapper.py deleted file mode 100755 index 08716e4d81..0000000000 --- a/metplus/wrappers/make_plots_wrapper.py +++ /dev/null @@ -1,309 +0,0 @@ -''' -Program Name: make_plots_wrapper.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper run_all_times to make plots -History Log: Fourth version -Usage: make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import logging -import os -import copy -import re -import subprocess -import datetime -import itertools - -from ..util import getlist -from ..util import met_util as util -from ..util import parse_var_list -from . import CommandBuilder - -# handle if module can't be loaded to run wrapper -WRAPPER_CANNOT_RUN = False -EXCEPTION_ERR = '' -try: - from ush.plotting_scripts import plot_util -except Exception as err_msg: - WRAPPER_CANNOT_RUN = True - EXCEPTION_ERR = err_msg - -class MakePlotsWrapper(CommandBuilder): - """! Wrapper to used to filter make plots from MET data - """ - accepted_verif_lists = { - 'grid2grid': { - 'pres': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_date_by_level.py', - 'plot_lead_by_level.py'], - 'anom': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_lead_by_date.py'], - 'sfc': ['plot_time_series.py', - 'plot_lead_average.py'], - }, - 'grid2obs': { - 'upper_air': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_stat_by_level.py', - 'plot_lead_by_level.py'], - 'conus_sfc': ['plot_time_series.py', - 'plot_lead_average.py'], - }, - # precip uses the same scripts for any verif case, so this value - # is a list instead of a dictionary - 'precip': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_threshold_average.py', - 'plot_threshold_by_lead.py'], - } - - add_from_c_dict_list = [ - 'VERIF_CASE', 'VERIF_TYPE', 'INPUT_BASE_DIR', 'OUTPUT_BASE_DIR', - 'SCRIPTS_BASE_DIR', 'DATE_TYPE', 'VALID_BEG', 'VALID_END', - 'INIT_BEG', 'INIT_END', 'AVERAGE_METHOD', 'CI_METHOD', - 'VERIF_GRID', 'EVENT_EQUALIZATION', 'LOG_METPLUS', 'LOG_LEVEL' - ] - - def __init__(self, config, instance=None): - self.app_path = 'python' - self.app_name = 'make_plots' - super().__init__(config, instance=instance) - - if WRAPPER_CANNOT_RUN: - self.log_error(f"There was a problem importing modules: {EXCEPTION_ERR}\n") - return - - def get_command(self): - - if not self.plotting_script: - self.log_error("No plotting script specified") - return None - - cmd = f"{self.app_path} {self.plotting_script}" - - return cmd - - def create_c_dict(self): - """! Create a data structure (dictionary) that contains all the - values set in the configuration files that are common for - make_plots_wrapper.py. - - Args: - - Returns: - c_dict - a dictionary containing the settings in the - configuration files unique to the wrapper - """ - c_dict = super().create_c_dict() - c_dict['VERBOSITY'] = ( - self.config.getstr('config', 'LOG_MAKE_PLOTS_VERBOSITY', - c_dict['VERBOSITY']) - ) - c_dict['LOOP_ORDER'] = self.config.getstr('config', 'LOOP_ORDER') - c_dict['INPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_INPUT_DIR') - c_dict['OUTPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_OUTPUT_DIR') - c_dict['SCRIPTS_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_SCRIPTS_DIR') - c_dict['DATE_TYPE'] = self.config.getstr('config', 'DATE_TYPE') - c_dict['VALID_BEG'] = self.config.getstr('config', 'VALID_BEG', '') - c_dict['VALID_END'] = self.config.getstr('config', 'VALID_END', '') - c_dict['INIT_BEG'] = self.config.getstr('config', 'INIT_BEG', '') - c_dict['INIT_END'] = self.config.getstr('config', 'INIT_END', '') - c_dict['GROUP_LIST_ITEMS'] = getlist( - self.config.getstr('config', 'GROUP_LIST_ITEMS') - ) - c_dict['LOOP_LIST_ITEMS'] = getlist( - self.config.getstr('config', 'LOOP_LIST_ITEMS') - ) - c_dict['VAR_LIST'] = parse_var_list(self.config) - c_dict['MODEL_LIST'] = getlist( - self.config.getstr('config', 'MODEL_LIST', '') - ) - c_dict['DESC_LIST'] = getlist( - self.config.getstr('config', 'DESC_LIST', '') - ) - c_dict['FCST_LEAD_LIST'] = getlist( - self.config.getstr('config', 'FCST_LEAD_LIST', '') - ) - c_dict['OBS_LEAD_LIST'] = getlist( - self.config.getstr('config', 'OBS_LEAD_LIST', '') - ) - c_dict['FCST_VALID_HOUR_LIST'] = getlist( - self.config.getstr('config', 'FCST_VALID_HOUR_LIST', '') - ) - c_dict['FCST_INIT_HOUR_LIST'] = getlist( - self.config.getstr('config', 'FCST_INIT_HOUR_LIST', '') - ) - c_dict['OBS_VALID_HOUR_LIST'] = getlist( - self.config.getstr('config', 'OBS_VALID_HOUR_LIST', '') - ) - c_dict['OBS_INIT_HOUR_LIST'] = getlist( - self.config.getstr('config', 'OBS_INIT_HOUR_LIST', '') - ) - c_dict['VX_MASK_LIST'] = getlist( - self.config.getstr('config', 'VX_MASK_LIST', '') - ) - c_dict['INTERP_MTHD_LIST'] = getlist( - self.config.getstr('config', 'INTERP_MTHD_LIST', '') - ) - c_dict['INTERP_PNTS_LIST'] = getlist( - self.config.getstr('config', 'INTERP_PNTS_LIST', '') - ) - c_dict['COV_THRESH_LIST'] = getlist( - self.config.getstr('config', 'COV_THRESH_LIST', '') - ) - c_dict['ALPHA_LIST'] = getlist( - self.config.getstr('config', 'ALPHA_LIST', '') - ) - c_dict['LINE_TYPE_LIST'] = getlist( - self.config.getstr('config', 'LINE_TYPE_LIST', '') - ) - c_dict['USER_SCRIPT_LIST'] = getlist( - self.config.getstr('config', 'MAKE_PLOTS_USER_SCRIPT_LIST', '') - ) - c_dict['VERIF_CASE'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_CASE', '') - - if c_dict['VERIF_CASE'] not in self.accepted_verif_lists: - self.log_error(self.c_dict['VERIF_CASE'] + " is not an" - + "an accepted MAKE_PLOTS_VERIF_CASE " - + "option. Options are " - + ', '.join(self.accepted_verif_lists.keys())) - - c_dict['VERIF_TYPE'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_TYPE', '') - - # if not precip case, check that verif type is an accepted verif type - if c_dict['VERIF_CASE'] != 'precip' and c_dict['VERIF_TYPE'] not in ( - self.accepted_verif_lists.get(c_dict['VERIF_CASE'], []) - ): - print(f"VERIF CASE: {c_dict['VERIF_CASE']}") - accepted_types = self.accepted_verif_lists.get(c_dict['VERIF_CASE']).keys() - self.log_error(f"{c_dict['VERIF_TYPE']} is not " - "an accepted MAKE_PLOTS_VERIF_TYPE " - "option for MAKE_PLOTS_VERIF_CASE " - f"= {c_dict['VERIF_CASE']}. Options " - f"are {', '.join(accepted_types)}") - - if not c_dict['USER_SCRIPT_LIST'] and not(c_dict['VERIF_CASE'] or - c_dict['VERIF_TYPE']): - self.log_error("Please defined either " - "MAKE_PLOTS_VERIF_CASE and " - "MAKE_PLOTS_VERIF_TYPE, or " - "MAKE_PLOTS_USER_SCRIPT_LIST") - - c_dict['STATS_LIST'] = getlist( - self.config.getstr('config', 'MAKE_PLOTS_STATS_LIST', '') - ) - c_dict['AVERAGE_METHOD'] = self.config.getstr( - 'config','MAKE_PLOTS_AVERAGE_METHOD', 'MEAN' - ) - c_dict['CI_METHOD'] = self.config.getstr('config', - 'MAKE_PLOTS_CI_METHOD', - 'NONE') - c_dict['VERIF_GRID'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_GRID') - c_dict['EVENT_EQUALIZATION'] = ( - self.config.getstr('config', 'MAKE_PLOTS_EVENT_EQUALIZATION') - ) - c_dict['LOG_METPLUS'] = self.config.getstr('config', 'LOG_METPLUS') - c_dict['LOG_LEVEL'] = self.config.getstr('config', 'LOG_LEVEL') - - # Get MET version used to run stat_analysis - c_dict['MET_VERSION'] = str(self.get_met_version()) - - return c_dict - - def setup_output_base(self): - # Set up output base - output_base_dir = self.c_dict['OUTPUT_BASE_DIR'] - output_base_data_dir = os.path.join(output_base_dir, 'data') - output_base_images_dir = os.path.join(output_base_dir, 'images') - if not os.path.exists(output_base_dir): - util.mkdir_p(output_base_dir) - util.mkdir_p(output_base_data_dir) - util.mkdir_p(output_base_images_dir) - else: - if os.path.exists(output_base_data_dir): - if len(output_base_data_dir) > 0: - for rmfile in os.listdir(output_base_data_dir): - os.remove(os.path.join(output_base_data_dir,rmfile)) - - def get_met_version(self): - stat_analysis_exe = os.path.join(self.config.getdir('MET_BIN_DIR'), - 'stat_analysis') - p = subprocess.Popen([stat_analysis_exe, "--version"], - stdout=subprocess.PIPE) - out, err = p.communicate() - out = out.decode(encoding='utf-8', errors='strict') - for line in out.split('\n'): - if 'MET Version:' in line: - met_verison_line = line - met_version_str = ( - met_verison_line.partition('MET Version:')[2].split('V')[1] - ) - if len(met_version_str) == 3: - met_version = float(met_version_str) - else: - met_version = float(met_version_str.rpartition('.')[0]) - - return met_version - - def create_plots(self, runtime_settings_dict_list): - - if self.c_dict['USER_SCRIPT_LIST']: - self.logger.info("Running plots for user specified list of " - "scripts.") - - elif (self.c_dict['VERIF_CASE'] and self.c_dict['VERIF_TYPE']): - self.logger.info("Running plots for VERIF_CASE = " - +self.c_dict['VERIF_CASE']+", " - +"VERIF_TYPE = " - +self.c_dict['VERIF_TYPE']) - - self.setup_output_base() - - if self.c_dict['USER_SCRIPT_LIST']: - scripts_to_run = self.c_dict['USER_SCRIPT_LIST'] - elif self.c_dict['VERIF_CASE'] == 'precip': - scripts_to_run = self.accepted_verif_lists.get(self.c_dict['VERIF_CASE']) - else: - scripts_to_run = self.accepted_verif_lists.get(self.c_dict['VERIF_CASE'])\ - .get(self.c_dict['VERIF_TYPE']) - - # Loop over run settings. - for runtime_settings_dict in runtime_settings_dict_list: - # set environment variables - for name, value in runtime_settings_dict.items(): - if name == 'JOBS': - continue - self.add_env_var(name, value.replace('"', '')) - - for key in self.add_from_c_dict_list: - if key not in runtime_settings_dict: - self.add_env_var(key, self.c_dict[key].replace('"', '')) - - self.add_env_var('MET_VERSION', self.c_dict['MET_VERSION']) - - # obtype env var is named differently in StatAnalysis wrapper - self.add_env_var('MODEL_OBTYPE', runtime_settings_dict['OBTYPE'].replace('"', '')) - - self.add_env_var('STATS', - ', '.join(self.c_dict['STATS_LIST']).replace('"', '')) - - # send environment variables to logger - self.set_environment_variables() - - for script in scripts_to_run: - self.plotting_script = ( - os.path.join(self.c_dict['SCRIPTS_BASE_DIR'], - script) - ) - - self.build() - self.clear() diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 73283e6a13..98a20efa2a 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -98,23 +98,7 @@ class StatAnalysisWrapper(CommandBuilder): 'LINE_TYPE_LIST', ] + format_lists + field_lists - force_group_for_make_plots_lists = [ - 'MODEL_LIST', - 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', - 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', - 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', - 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', - ] - list_categories = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] - list_categories_make_plots = [ - 'GROUP_LIST_ITEMS_MAKE_PLOTS', - 'LOOP_LIST_ITEMS_MAKE_PLOTS' - ] def __init__(self, config, instance=None): self.app_path = os.path.join(config.getdir('MET_BIN_DIR', ''), @@ -224,24 +208,6 @@ def create_c_dict(self): # read all field lists and check if they are all empty c_dict['all_field_lists_empty'] = self.read_field_lists_from_config(c_dict) - - # check if MakePlots is in process list and set boolean - # MakePlots is removed from the list in met_util.get_process_list, so - # need to read the conf value again - self.runMakePlots = 'MakePlots' in self.config.getstr('config', 'PROCESS_LIST') - if self.runMakePlots: - # only import MakePlots wrappers if it will be used - from .make_plots_wrapper import MakePlotsWrapper, WRAPPER_CANNOT_RUN - if WRAPPER_CANNOT_RUN: - self.log_error("Cannot import MakePlots wrapper! Requires pandas and numpy") - else: - self.check_MakePlots_config(c_dict) - - # create MakePlots wrapper instance - self.MakePlotsWrapper = MakePlotsWrapper(self.config) - if not self.MakePlotsWrapper.isOK: - self.log_error("MakePlotsWrapper was not initialized correctly.") - c_dict['VAR_LIST'] = parse_var_list(self.config) c_dict['MODEL_INFO_LIST'] = self.parse_model_info() @@ -300,30 +266,11 @@ def c_dict_error_check(self, c_dict): "[FCST/OBS]_VAR_[NAME/LEVELS]. Use " "one or the other formats to run") - # if no var list is found, other lists must be set to run MakePlots - elif not c_dict['VAR_LIST'] and c_dict['all_field_lists_empty'] and self.runMakePlots: - self.log_error("No field information found. Must define fields to " - "process with either [FCST/OBS]_VAR_LIST or " - "[FCST/OBS]_VAR_[NAME/LEVELS]") - # if MODEL_LIST was not set in config, populate it from the model info list # if model info list is also not set, report and error if not c_dict['MODEL_LIST'] and not c_dict['MODEL_INFO_LIST']: self.log_error("No model information was found.") - # if running MakePlots and model list in group list, error and exit - if self.runMakePlots: - if 'MODEL_LIST' in c_dict['GROUP_LIST_ITEMS']: - self.log_error("Cannot group MODELS if running MakePlots. Remove " - "MODEL_LIST from LOOP_LIST_ITEMS") - - if len(c_dict['MODEL_LIST']) > 8: - self.log_error("Number of models for plotting limited to 8.") - - # set forMakePlots to False to begin. When gathering settings to - # send to MakePlots wrapper, this will be set to True - self.forMakePlots = False - return c_dict def read_field_lists_from_config(self, field_dict): @@ -349,64 +296,8 @@ def read_field_lists_from_config(self, field_dict): return all_empty - def check_MakePlots_config(self, c_dict): - - # the following are specific to running MakePlots wrapper - bad_config_variable_list = [ - 'FCST_VAR_LIST', 'FCST_LEVEL_LIST', - 'FCST_THRESH_LIST', 'FCST_UNITS_LIST', - 'OBS_VAR_LIST', 'OBS_LEVEL_LIST', - 'OBS_THRESH_LIST', 'OBS_UNITS_LIST' - ] - for bad_config_variable in bad_config_variable_list: - if c_dict[bad_config_variable]: - self.log_error("Bad config option for running StatAnalysis " - "followed by MakePlots. Please remove " - +bad_config_variable+" and set using FCST/OBS_VARn") - - loop_group_accepted_options = [ - 'FCST_VALID_HOUR_LIST', 'FCST_INIT_HOUR_LIST', - 'OBS_VALID_HOUR_LIST', 'OBS_INIT_HOUR_LIST' - ] - for config_list in c_dict['GROUP_LIST_ITEMS']: - if config_list not in loop_group_accepted_options: - self.log_error("Bad config option for running StatAnalysis " - +"followed by MakePlots. Only accepted " - +"values in GROUP_LIST_ITEMS are " - +"FCST_VALID_HOUR_LIST, " - +"FCST_INIT_HOUR_LIST, " - +"OBS_VALID_HOUR_LIST, " - +"OBS_INIT_HOUR_LIST. " - +"Found "+config_list) - - for config_list in c_dict['LOOP_LIST_ITEMS']: - if config_list not in loop_group_accepted_options: - self.log_error("Bad config option for running StatAnalysis " - +"followed by MakePlots. Only accepted " - +"values in LOOP_LIST_ITEMS are " - +"FCST_VALID_HOUR_LIST, " - +"FCST_INIT_HOUR_LIST, " - +"OBS_VALID_HOUR_LIST, " - +"OBS_INIT_HOUR_LIST. " - +"Found "+config_list) - - # Do checks for required configuration file options that are - # defined by user. - required_config_variable_list = [ - 'VX_MASK_LIST', 'FCST_LEAD_LIST', 'LINE_TYPE_LIST' - ] - for required_config_variable in required_config_variable_list: - if len(c_dict[required_config_variable]) == 0: - self.log_error(required_config_variable+" has no items. " - +"This list must have items to run " - +"StatAnalysis followed by MakePlots.") - - # if MakePlots is run but -dump_row is not found in the job args, error - if not any([item for item in c_dict['JOBS'] if '-dump_row' in item]): - self.log_error("Must include -dump_row in at least one " - "STAT_ANALYSIS_JOB if running MakePlots") - - def list_to_str(self, list_of_values, add_quotes=True): + @staticmethod + def list_to_str(list_of_values, add_quotes=True): """! Turn a list of values into a single string so it can be set to an environment variable and read by the MET stat_analysis config file. @@ -448,33 +339,17 @@ def set_lists_loop_or_group(self, c_dict): the list names whose items are being looped over) """ # get list of config variables not found in either GROUP_LIST_ITEMS or LOOP_LIST_ITEMS - missing_config_list = [conf for conf in self.expected_config_lists if conf not in c_dict['GROUP_LIST_ITEMS']] - missing_config_list = [conf for conf in missing_config_list if conf not in c_dict['LOOP_LIST_ITEMS']] - found_config_list = [conf for conf in self.expected_config_lists if conf not in missing_config_list] + missing_config_list = [conf for conf in self.expected_config_lists + if conf not in c_dict['GROUP_LIST_ITEMS']] + missing_config_list = [conf for conf in missing_config_list + if conf not in c_dict['LOOP_LIST_ITEMS']] + found_config_list = [conf for conf in self.expected_config_lists + if conf not in missing_config_list] # loop through lists not found in either loop or group lists for missing_config in missing_config_list: - # if running MakePlots - if self.runMakePlots: - - # if LINE_TYPE_LIST is missing, add it to group list - if missing_config == 'LINE_TYPE_LIST': - c_dict['GROUP_LIST_ITEMS'].append(missing_config) - - # else if list in config_dict is empty, warn and add to group list - elif not c_dict[missing_config]: - self.logger.warning(missing_config + " is empty, " - + "will be treated as group.") - c_dict['GROUP_LIST_ITEMS'].append(missing_config) - - # otherwise add to loop list - else: - c_dict['LOOP_LIST_ITEMS'].append(missing_config) - - # if not running MakePlots, just add missing list to group list - else: - c_dict['GROUP_LIST_ITEMS'].append(missing_config) + c_dict['GROUP_LIST_ITEMS'].append(missing_config) # loop through lists found in either loop or group lists originally for found_config in found_config_list: @@ -490,16 +365,6 @@ def set_lists_loop_or_group(self, c_dict): self.logger.debug("Items in these lists will be looped over: " + ', '.join(c_dict['LOOP_LIST_ITEMS'])) - # if running MakePlots, create new group and loop lists based on - # the criteria for running that wrapper - if self.runMakePlots: - c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] = list(c_dict['GROUP_LIST_ITEMS']) - c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] = list(c_dict['LOOP_LIST_ITEMS']) - for force_group_list in self.force_group_for_make_plots_lists: - if force_group_list in c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS']: - c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'].remove(force_group_list) - c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'].append(force_group_list) - return c_dict def format_thresh(self, thresh): @@ -549,13 +414,6 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): list_name = loop_list.replace('_LIST', '') stringsub_dict_keys.append(list_name.lower()) for group_list in lists_to_group: - # if setting up MakePlots, skip adding forced - # group lists so they will remain templates - # to be filled in by the plotting scripts - if (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): - continue - list_name = group_list.replace('_LIST', '') stringsub_dict_keys.append(list_name.lower()) @@ -809,13 +667,10 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): for group_list in lists_to_group: list_name = group_list.replace('_LIST', '') list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') \ + config_dict[list_name].replace('"', '').replace(' ', '') .replace(',', '_').replace('*', 'ALL') ) if 'THRESH' in list_name: - if (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): - continue thresh_letter = self.format_thresh( config_dict[list_name] @@ -920,11 +775,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict['init_hour'] = ( stringsub_dict['init_hour_end'] ) - elif not (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): - # if setting up MakePlots, skip adding forced - # group lists so they will remain templates - # to be filled in by the plotting scripts + else: stringsub_dict[list_name.lower()] = list_name_value nkeys_end = len(stringsub_dict_keys) @@ -968,105 +819,38 @@ def get_output_filename(self, output_type, filename_template, lists_to_group, config_dict) if filename_type == 'default': - if (self.runMakePlots and output_type == 'dump_row'): - filename_template_prefix = ( - filename_template+date_type.lower() - +'{'+date_type.lower()+'_beg?fmt=%Y%m%d}' - +'to{'+date_type.lower()+'_end?fmt=%Y%m%d}_' - ) - if (stringsub_dict['valid_hour_beg'] != '' - and stringsub_dict['valid_hour_end'] != ''): - filename_template_prefix+=( - 'valid{valid_hour_beg?fmt=%H%M}to' - +'{valid_hour_end?fmt=%H%M}Z_' - ) - else: - filename_template_prefix+=( - 'fcst_valid{fcst_valid_hour_beg?fmt=%H%M}to' - +'{fcst_valid_hour_end?fmt=%H%M}Z_' - 'obs_valid{obs_valid_hour_beg?fmt=%H%M}to' - +'{obs_valid_hour_end?fmt=%H%M}Z_' - ) - if (stringsub_dict['init_hour_beg'] != '' - and stringsub_dict['init_hour_end'] != ''): - filename_template_prefix+=( - 'init{init_hour_beg?fmt=%H%M}to' - +'{init_hour_end?fmt=%H%M}Z' - ) - else: - filename_template_prefix+=( - 'fcst_init{fcst_init_hour_beg?fmt=%H%M}to' - +'{fcst_init_hour_end?fmt=%H%M}Z_' - 'obs_init{obs_init_hour_beg?fmt=%H%M}to' - +'{obs_init_hour_end?fmt=%H%M}Z' - ) - filename_template_prefix+=( - '_fcst_lead{fcst_lead?fmt=%s}' - +'_fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}' - +'{fcst_thresh?fmt=%s}{interp_mthd?fmt=%s}_' - +'obs{obs_var?fmt=%s}{obs_level?fmt=%s}' - +'{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_' - +'vxmask{vx_mask?fmt=%s}' + + if date_beg == date_end: + filename_template = ( + filename_template+date_type.lower()+date_beg ) - if 'DESC_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_desc{desc?fmt=%s}' - ) - if 'OBS_LEAD_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_obs_lead{obs_lead?fmt=%s}' - ) - if 'INTERP_PNTS_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_interp_pnts{interp_pnts?fmt=%s}' - ) - if 'COV_THRESH_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_cov_thresh{cov_thresh?fmt=%s}' - ) - if 'ALPHA_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_alpha{alpha?fmt=%s}' - ) - filename_template = filename_template_prefix else: - if date_beg == date_end: - filename_template = ( - filename_template+date_type.lower()+date_beg - ) - else: - filename_template = ( - filename_template+date_type.lower()+ - date_beg+'to'+date_end - ) - for loop_list in lists_to_loop: - if loop_list != 'MODEL_LIST': - list_name = loop_list.replace('_LIST', '') - if 'HOUR' in list_name: - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '')+'Z' - ) - else: - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '') - ) + filename_template = ( + filename_template+date_type.lower()+ + date_beg+'to'+date_end + ) + for loop_list in lists_to_loop: + if loop_list != 'MODEL_LIST': + list_name = loop_list.replace('_LIST', '') + if 'HOUR' in list_name: + filename_template = ( + filename_template+'_' + +list_name.replace('_', '').lower() + +config_dict[list_name].replace('"', '')+'Z' + ) + else: + filename_template = ( + filename_template+'_' + +list_name.replace('_', '').lower() + +config_dict[list_name].replace('"', '') + ) filename_template += '_' + output_type + '.stat' self.logger.debug("Building "+output_type+" filename from " +filename_type+" template: "+filename_template) output_filename = do_string_sub(filename_template, - **stringsub_dict, - skip_missing_tags=self.forMakePlots) + **stringsub_dict) return output_filename def get_lookin_dir(self, dir_path, lists_to_loop, lists_to_group, config_dict): @@ -1315,15 +1099,10 @@ def parse_model_info(self): ) model_dump_row_filename_type = model_filename_type elif output_type == 'OUT_STAT': - # if MakePlots is run - if self.runMakePlots: - model_out_stat_filename_template = 'NA' - model_out_stat_filename_type = 'NA' - else: - model_out_stat_filename_template = ( - model_filename_template - ) - model_out_stat_filename_type = model_filename_type + model_out_stat_filename_template = ( + model_filename_template + ) + model_out_stat_filename_type = model_filename_type mod = {} mod['name'] = model_name @@ -1404,12 +1183,8 @@ def get_runtime_settings_dict_list(self): # Loop over run settings. formatted_runtime_settings_dict_list = [] for runtime_settings_dict in runtime_settings_dict_list: - if self.forMakePlots: - loop_lists = c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] - group_lists = c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] - else: - loop_lists = c_dict['LOOP_LIST_ITEMS'] - group_lists = c_dict['GROUP_LIST_ITEMS'] + loop_lists = c_dict['LOOP_LIST_ITEMS'] + group_lists = c_dict['GROUP_LIST_ITEMS'] # Set up stat_analysis -lookin argument, model and obs information # and stat_analysis job. @@ -1457,10 +1232,6 @@ def get_runtime_settings(self, c_dict): group_lists = c_dict['GROUP_LIST_ITEMS'] loop_lists = c_dict['LOOP_LIST_ITEMS'] - if self.forMakePlots: - group_lists = c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] - loop_lists = c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] - runtime_setup_dict = {} # Fill setup dictionary for MET config variable name # and its value as a string for group lists. @@ -1468,15 +1239,7 @@ def get_runtime_settings(self, c_dict): runtime_setup_dict_name = group_list.replace('_LIST', '') add_quotes = False if 'THRESH' in group_list else True - # if preparing for MakePlots, change - # commas to _ and * to ALL in list items - if self.forMakePlots: - formatted_list = [] - for format_list in c_dict[group_list]: - formatted_list.append(format_list.replace(',', '_') - .replace('*', 'ALL')) - else: - formatted_list = c_dict[group_list] + formatted_list = c_dict[group_list] runtime_setup_dict[runtime_setup_dict_name] = ( [self.list_to_str(formatted_list, add_quotes=add_quotes)] @@ -1608,35 +1371,6 @@ def get_c_dict_list(self): c_dict_list.append(c_dict) - # if preparing for MakePlots, combine levels and thresholds for each name - if self.forMakePlots: - output_c_dict_list = [] - for c_dict in c_dict_list: - if c_dict['index'] not in [conf['index'] for conf in output_c_dict_list]: - output_c_dict_list.append(c_dict) - else: - for output_dict in output_c_dict_list: - if c_dict['index'] == output_dict['index']: - - for level in c_dict['FCST_LEVEL_LIST']: - if level not in output_dict['FCST_LEVEL_LIST']: - output_dict['FCST_LEVEL_LIST'].append(level) - - for level in c_dict['OBS_LEVEL_LIST']: - if level not in output_dict['OBS_LEVEL_LIST']: - output_dict['OBS_LEVEL_LIST'].append(level) - - for thresh in c_dict['FCST_THRESH_LIST']: - if thresh not in output_dict['FCST_THRESH_LIST']: - output_dict['FCST_THRESH_LIST'].append(thresh) - - for thresh in c_dict['OBS_THRESH_LIST']: - if thresh not in output_dict['OBS_THRESH_LIST']: - output_dict['OBS_THRESH_LIST'].append(thresh) - - - return output_c_dict_list - return c_dict_list def add_other_lists_to_c_dict(self, c_dict): @@ -1646,9 +1380,6 @@ def add_other_lists_to_c_dict(self, c_dict): """ # add group and loop lists lists_to_add = self.list_categories - if self.runMakePlots: - lists_to_add.extend(self.list_categories_make_plots) - for list_category in lists_to_add: list_items = self.c_dict[list_category] if list_category not in c_dict: @@ -1683,20 +1414,19 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict, loop_lists, grou # set MODEL and OBTYPE to single item to find lookin dir runtime_settings_dict['MODEL'] = '"'+model_info['name']+'"' runtime_settings_dict['OBTYPE'] = '"'+model_info['obtype']+'"' - # don't get lookin dir if getting settings for MakePlots - if not self.forMakePlots: - lookin_dirs.append(self.get_lookin_dir(model_info['dir'], - loop_lists, - group_lists, - runtime_settings_dict, - ) - ) + + lookin_dirs.append(self.get_lookin_dir(model_info['dir'], + loop_lists, + group_lists, + runtime_settings_dict, + ) + ) # set lookin dir command line argument runtime_settings_dict['LOOKIN_DIR'] = ' '.join(lookin_dirs) # error and return None if lookin dir is empty - if not self.forMakePlots and not runtime_settings_dict['LOOKIN_DIR']: + if not runtime_settings_dict['LOOKIN_DIR']: self.log_error("No value found for lookin dir") return None @@ -1739,29 +1469,12 @@ def run_stat_analysis(self): or initialization dates for a job defined by the user. """ - self.forMakePlots = False - runtime_settings_dict_list = self.get_runtime_settings_dict_list() if not runtime_settings_dict_list: return False self.run_stat_analysis_job(runtime_settings_dict_list) - # if running MakePlots, get its runtime_settings_dict_list and call - if self.runMakePlots: - self.logger.debug("Preparing settings to pass to MakePlots wrapper") - self.forMakePlots = True - runtime_settings_dict_list = ( - self.get_runtime_settings_dict_list() - ) - if not runtime_settings_dict_list: - return False - - self.MakePlotsWrapper.create_plots(runtime_settings_dict_list) - if self.MakePlotsWrapper.errors: - self.log_error("MakePlots produced " - f"{self.MakePlotsWrapper.errors} errors.") - return True def run_stat_analysis_job(self, runtime_settings_dict_list): diff --git a/ush/plotting_scripts/plot_date_by_level.py b/ush/plotting_scripts/plot_date_by_level.py deleted file mode 100644 index 291759341b..0000000000 --- a/ush/plotting_scripts/plot_date_by_level.py +++ /dev/null @@ -1,819 +0,0 @@ -''' -Name: plot_date_by_level.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper - run_all_times to make date-pressure plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - - -import plot_util as plot_util - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - -for level_list in fcst_var_level_list: - for level in level_list: - if not level.startswith('P'): - logger.warning(f"Forecast level value ({level}) expected " - "to be in pressure, i.e. P500. Exiting.") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead+" " - +"and forecast variable "+fcst_var_name+" " - +fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Set up expected date in MET .stat file and date plot information - plot_time_dates, expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from METplus " - +"conf file has length of 0. Not enough information " - +"was provided to build date information. Please check " - +"provided VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - # Build date by forecst level grid for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - xmesh, ymesh = np.meshgrid(plot_time_dates, fcst_var_levels_int) - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] - model_level_now_data_index = pd.MultiIndex.from_product( - [ - [model_plot_name], [fcst_var_level], - expected_stat_file_dates - ], - names=['model_plot_name', 'levels', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, -# model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_level_now_data = ( - pd.DataFrame(np.nan, - index=model_level_now_data_index, - columns=[ 'TOTAL' ]) - ) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_level_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_level_now_stat_file_data.rename( - columns=dict(zip( - model_level_now_stat_file_data \ - .columns[:nbase_columns], - stat_file_base_columns - )), inplace=True - ) - line_type = model_level_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_level_now_stat_file_data.rename( - columns=dict(zip( - model_level_now_stat_file_data \ - .columns[nbase_columns:], - stat_file_line_type_columns - )), inplace=True - ) - model_level_now_stat_file_data_fcstvaliddates = ( - model_level_now_stat_file_data.loc[:] \ - ['FCST_VALID_BEG'].values - ) - model_level_now_data = ( - pd.DataFrame(np.nan, index=model_level_now_data_index, - columns=stat_file_line_type_columns) - ) - model_level_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_level_now_stat_file_data \ - .loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_level_now_stat_file_data \ - .loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_level_now_stat_file_data_fcstvaliddates: - matching_date_idx = ( - model_level_now_stat_file_data_fcstvaliddates \ - .tolist().index(expected_date) - ) - model_level_now_stat_file_data_indexed = ( - model_level_now_stat_file_data \ - .loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_level_now_data.loc[ - (model_plot_name, - fcst_var_level, - expected_date) - ][col] = ( - model_level_now_stat_file_data_indexed \ - .loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_level_now_data = ( - pd.DataFrame(np.nan, - index=model_level_now_data_index, - columns=[ 'TOTAL' ]) - ) - if vl > 0: - model_now_data = pd.concat( - [model_now_data, model_level_now_data] - ) - else: - model_now_data = model_level_now_data - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - # Calculate statistics and plots - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == "True": - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0])): - for vl in range(len(fcst_var_level_list)): - stat_values_array[l,:,vl,:] = ( - np.ma.mask_cols(stat_values_array[l,:,vl,:]) - ) - np.ma.set_fill_value(stat_values_array, np.nan) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_stat_values_array = stat_values_array[1,0,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=20) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_stat_values_array, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[0,model_idx,:,:] - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=20) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_stat_values_array - - stat_values_array[1,model_idx,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_stat_values_array - ) - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_stat_values_array = model_stat_values_array - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_stat_values_array - model1_stat_values_array - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_average.py b/ush/plotting_scripts/plot_lead_average.py deleted file mode 100644 index 9779403595..0000000000 --- a/ush/plotting_scripts/plot_lead_average.py +++ /dev/null @@ -1,657 +0,0 @@ -''' -Name: plot_lead_average.py -Contact(s): Mallory Row -Abstract: Reads average and CI files from plot_time_series.py to make dieoff plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - - -if len(fcst_lead_list[0]) < 2: - logger.warning("Must provide more than one forecast lead to " - "plot lead average") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" "+fcst_var_level+" " - +fcst_var_thresh) - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - CI_file_cols = ['LEADS', 'CI_VALS'] - CI_bar_max_widths = np.append( - np.diff(fcst_lead_timedeltas), - fcst_lead_timedeltas[-1]-fcst_lead_timedeltas[-2] - )/1.5 - CI_bar_min_widths = np.append( - np.diff(fcst_lead_timedeltas), - fcst_lead_timedeltas[-1]-fcst_lead_timedeltas[-2] - )/nmodels - CI_bar_intvl_widths = ( - (CI_bar_max_widths-CI_bar_min_widths)/nmodels - ) - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_leads)] - ) - model_avg_data.fill(np.nan) -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - logger.debug(f"FCST LEAD IS {fcst_lead}") - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, fcst_lead_idx] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" does not exist") -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_CI_'+ci_method+'.txt' -# ) -# CI_file = os.path.join(output_base_dir, 'data', CI_filename) - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - model_CI_data = np.empty(len(fcst_leads)) - model_CI_data.fill(np.nan) - if ci_method != 'NONE': - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - diff_from_avg_data = model_avg_data[1,:] - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'].tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'].tolist() - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = ( - fcst_leads.index(fcst_lead) - ) - if fcst_lead in model_CI_file_data_leads: - model_CI_file_data_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals[fcst_lead_idx] - != '--'): - model_CI_data[fcst_lead_idx] = ( - float(model_CI_file_data_vals \ - [fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - else: - if model_num == 1: - diff_from_avg_data = model_avg_data[0,:] - else: - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with " - +"plot name " - +model_plot_name+" " - +"file: "+CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with " - +"plot name " - +model_plot_name+" " - +"file: "+CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = ( - fcst_leads.index(fcst_lead) - ) - if fcst_lead in model_CI_file_data_leads: - model_CI_file_data_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals \ - [fcst_lead_idx] - != '--'): - model_CI_data[fcst_lead_idx] = ( - float(model_CI_file_data_vals \ - [fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - if model_num == 1: - fig, (ax1, ax2) = plt.subplots(2,1,figsize=(10,12), - sharex=True) - ax1.grid(True) - ax1.tick_params(axis='x', pad=15) - ax1.set_xticks(fcst_lead_timedeltas) - ax1.set_xticklabels(fcst_lead_timedeltas_str) - ax1.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax1.tick_params(axis='y', pad=15) - ax1.set_ylabel(average_method.title(), labelpad=30) - ax2.grid(True) - ax2.tick_params(axis='x', pad=15) - ax2.set_xlabel('Forecast Lead', labelpad=30) - ax2.tick_params(axis='y', pad=15) - ax2.set_ylabel('Difference', labelpad=30) - boxstyle = matplotlib.patches.BoxStyle('Square', pad=0.25) - props = {'boxstyle': boxstyle, - 'facecolor': 'white', - 'linestyle': 'solid', - 'linewidth': 1, - 'edgecolor': 'black',} - ax2.text(0.7055, 1.05, 'Note: differences outside the ' - +'outline bars are significant\n at the 95% ' - +'confidence interval', ha='center', va='center', - fontsize=10, bbox=props, transform=ax2.transAxes) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.plot(fcst_lead_timedeltas, model_avg_data[1,:], - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs', - zorder=4) - ax2.plot(fcst_lead_timedeltas, - np.zeros_like(fcst_lead_timedeltas), - color='#888888', - ls='-', linewidth=2.0, - zorder=4) - ax2.plot(fcst_lead_timedeltas, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - else: - ax2.plot(fcst_lead_timedeltas, - np.zeros_like(fcst_lead_timedeltas), - color='black', - ls='-', linewidth=2.0, - zorder=4) - ax1.plot(fcst_lead_timedeltas, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - else: - ax1.plot(fcst_lead_timedeltas, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax2.plot(fcst_lead_timedeltas, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - ax2.bar(fcst_lead_timedeltas, 2*np.absolute(model_CI_data), - bottom=-1*np.absolute(model_CI_data), - width=CI_bar_max_widths-(CI_bar_intvl_widths*model_idx), - color='None', edgecolor=colors[model_idx], linewidth=1.5) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels+1, fontsize='13', - mode='expand', borderaxespad=0.) - else: - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels, fontsize='13', - mode='expand', borderaxespad=0.) - savefig_imagename = stat+'_'+base_name+'.png' - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_by_date.py b/ush/plotting_scripts/plot_lead_by_date.py deleted file mode 100644 index 2c5b527290..0000000000 --- a/ush/plotting_scripts/plot_lead_by_date.py +++ /dev/null @@ -1,776 +0,0 @@ -''' -Name: plot_lead_by_date.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper run_all_times - to make lead-date plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" " - +fcst_var_thresh) - # Set up base name for file naming convention for lead average files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_leadFCSTLEADHOLDER' - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - for fl in range(len(fcst_leads)): - fcst_lead = fcst_leads[fl] - # Set up expected date in MET .stat file - # and date plot information - plot_time_dates, expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead) - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from " - +"METplus conf file has length of 0. Not enough " - +"information was provided to build date " - +"information. Please check provided " - +"VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - model_lead_now_data_index = pd.MultiIndex.from_product( - [[model_plot_name], [fcst_lead], expected_stat_file_dates], - names=['model_plot_name', 'leads', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEADHOLDER', fcst_lead) -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, -# model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'obs_lead': obs_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_lead_now_data = pd.DataFrame( - np.nan, index=model_lead_now_index, - columns=[ 'TOTAL' ] - ) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_lead_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_lead_now_stat_file_data.rename( - columns=dict(zip( - model_lead_now_stat_file_data.columns \ - [:len(stat_file_base_columns)], - stat_file_base_columns - )), inplace=True - ) - line_type = model_lead_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_lead_now_stat_file_data.rename( - columns=dict(zip( - model_lead_now_stat_file_data.columns \ - [len(stat_file_base_columns):], - stat_file_line_type_columns - )), inplace=True - ) - model_lead_now_stat_file_data_fcstvaliddates = ( - model_lead_now_stat_file_data.loc[:] \ - ['FCST_VALID_BEG'].values - ) - model_lead_now_data = ( - pd.DataFrame(np.nan, index=model_lead_now_data_index, - columns=stat_file_line_type_columns) - ) - model_lead_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_lead_now_stat_file_data \ - .loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_lead_now_stat_file_data \ - .loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_lead_now_stat_file_data_fcstvaliddates: - matching_date_idx = ( - model_lead_now_stat_file_data_fcstvaliddates \ - .tolist().index(expected_date) - ) - model_lead_now_stat_file_data_indexed = ( - model_lead_now_stat_file_data \ - .loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_lead_now_data.loc[ - (model_plot_name, - fcst_lead, - expected_date) - ][col] = ( - model_lead_now_stat_file_data_indexed \ - .loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_lead_now_data = pd.DataFrame( - np.nan, index=model_lead_now_index, - columns=[ 'TOTAL' ] - ) - if fl > 0: - model_now_data = pd.concat( - [model_now_data, model_lead_now_data] - ) - else: - model_now_data = model_lead_now_data - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - # Build lead by date grid for plotting - ymesh, xmesh = np.meshgrid(plot_time_dates, fcst_lead_timedeltas) - # Calculate statistics and plots - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == 'True': - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0,0])): - for fl in range(len(fcst_leads)): - stat_values_array[l,:,fl,:] = ( - np.ma.mask_cols(stat_values_array[l,:,fl,:]) - ) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_stat_values_array = stat_values_array[1,0,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(date_type.title()+' Date', labelpad=20) - ax.set_ylim([plot_time_dates[0],plot_time_dates[-1]]) - ax.yaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.yaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.yaxis.set_minor_locator(md.DayLocator()) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_stat_values_array, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[0,model_idx,:,:] - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(date_type.title()+' Date', labelpad=20) - ax.set_ylim([plot_time_dates[0],plot_time_dates[-1]]) - ax.yaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.yaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.yaxis.set_minor_locator(md.DayLocator()) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name - +" - obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_stat_values_array - - stat_values_array[1,model_idx,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_stat_values_array - ) - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_stat_values_array = model_stat_values_array - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_stat_values_array - model1_stat_values_array - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEADHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_by_level.py b/ush/plotting_scripts/plot_lead_by_level.py deleted file mode 100644 index c26ee96c83..0000000000 --- a/ush/plotting_scripts/plot_lead_by_level.py +++ /dev/null @@ -1,707 +0,0 @@ -''' -Name: plot_lead_by_level.py -Contact(s): Mallory Row -Abstract: Reads average files from plot_time_series.py to make lead-pressue plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - -for level_list in fcst_var_level_list: - for level in level_list: - if not level.startswith('P'): - logger.warning(f"Forecast level value ({level}) expected " - "to be in pressure, i.e. P500. Exiting.") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages" - +" for forecast variable "+fcst_var_name - +" "+fcst_var_thresh) - # Set up base name for file naming convention for lead average files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Build date by forecst level grid for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - xmesh, ymesh = np.meshgrid(fcst_lead_timedeltas, fcst_var_levels_int) - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Reading in model lead average files produced from - # plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_levels), - len(fcst_leads)] - ) - model_avg_data.fill(np.nan) - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.error("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - sys.exit(1) - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index( - fcst_lead - ) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vl, - fcst_lead_idx] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.error("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" does not exist") - sys.exit(1) - - if model_num == 1: - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_avg_data = model_avg_data[1,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0], - fcst_var_levels_int[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_avg_data, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_avg_data, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0], - fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_avg_data[0,:,:] - - model_avg_data[1,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num) - +" "+model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_avg_data[0,:,:] - ) - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_avg_data = model_avg_data[0,:,:] - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_avg_data[0,:,:] - model1_avg_data - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_stat_by_level.py b/ush/plotting_scripts/plot_stat_by_level.py deleted file mode 100644 index e96bb8e8af..0000000000 --- a/ush/plotting_scripts/plot_stat_by_level.py +++ /dev/null @@ -1,504 +0,0 @@ -''' -Name: plot_stat_by_level.py -Contact(s): Mallory Row -Abstract: Reads average forecast hour files from plot_time_series.py - to make stat-pressue plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead - +" and forecast variable "+fcst_var_name - +" "+fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Build forecast levels for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - # Reading in model lead averages files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_levels)] - ) - model_avg_data.fill(np.nan) - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) \ -# .replace(fcst_lead, '_avgs') -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vl] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") - if model_num == 1: - fig, ax = plt.subplots(1,1,figsize=(10,12)) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(stat_plot_name, labelpad=30) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level', labelpad=30) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax.plot(model_avg_data[1,:], fcst_var_levels_int, - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs ', - zorder=4) - ax.plot(model_avg_data[0,:], fcst_var_levels_int, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax.legend(bbox_to_anchor=(1.025, 1.0, 0.3, 0.0), loc='upper right', - ncol=1, fontsize='13', mode='expand', borderaxespad=0.) - ax.set_title(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_threshold_average.py b/ush/plotting_scripts/plot_threshold_average.py deleted file mode 100644 index 9f8714f5bc..0000000000 --- a/ush/plotting_scripts/plot_threshold_average.py +++ /dev/null @@ -1,649 +0,0 @@ -''' -Name: plot_threshold_average.py -Contact(s): Mallory Row -Abstract: Reads average and CI files from plot_time_series.py to make dieoff plots -History Log: First version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = [os.environ['FCST_THRESH'].split(', ')] -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = [os.environ['OBS_THRESH'].split(', ')] -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_threshs = plot_info[2] - obs_var_threshs = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_threshs) - ] - fcst_var_threshs_format = np.full_like( - fcst_var_threshs, np.nan, dtype=object - ) - fcst_var_threshs_float = np.full_like( - fcst_var_threshs, np.nan, dtype=float - ) - for fcst_var_thresh in fcst_var_threshs: - fcst_var_thresh_idx = fcst_var_threshs.index(fcst_var_thresh) - fcst_var_thresh_symbol, fcst_var_thresh_letter = ( - plot_util.format_thresh(fcst_var_thresh) - ) - fcst_var_threshs_format[fcst_var_thresh_idx] = fcst_var_thresh_letter - fcst_var_threshs_float[fcst_var_thresh_idx] = ( - fcst_var_thresh_letter[2:] - ) - obs_var_threshs_format = np.full_like( - obs_var_threshs, np.nan, dtype=object - ) - for obs_var_thresh in obs_var_threshs: - obs_var_thresh_idx = obs_var_threshs.index(obs_var_thresh) - obs_var_thresh_symbol, obs_var_thresh_letter = ( - plot_util.format_thresh(obs_var_thresh) - ) - obs_var_threshs_format[obs_var_thresh_idx] = obs_var_thresh_letter - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - logger.info("Working on forecast threshold averages " - +"for forecast lead "+fcst_lead+" " - +"for forecast variable "+fcst_var_name+" "+fcst_var_level) - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +'FCSTTHRESHHOLDER'+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +'OBSTHRESHHOLDER'+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - CI_file_cols = ['LEADS', 'CI_VALS'] - CI_bar_max_widths = np.append( - np.diff(fcst_var_threshs_float), - fcst_var_threshs_float[-1]-fcst_var_threshs_float[-2] - )/1.5 - CI_bar_min_widths = np.append( - np.diff(fcst_var_threshs_float), - fcst_var_threshs_float[-1]-fcst_var_threshs_float[-2] - )/nmodels - CI_bar_intvl_widths = ( - (CI_bar_max_widths-CI_bar_min_widths)/nmodels - ) - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_threshs_format)] - ) - model_avg_data.fill(np.nan) - model_CI_data = np.empty(len(fcst_var_threshs_format)) - model_CI_data.fill(np.nan) - for vt in range(len(fcst_var_threshs_format)): - fcst_var_thresh_format = fcst_var_threshs_format[vt] - obs_var_thresh_format = obs_var_threshs_format[vt] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vt] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'_CI_'+ci_method+'.txt' -# ) -# CI_file = os.path.join(output_base_dir, 'data', CI_filename) - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - if ci_method != 'NONE': - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - diff_from_avg_data = model_avg_data[1,:] - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - model_fcst_lead_idx = ( - model_CI_file_data_leads.index(fcst_lead) - ) - if (model_CI_file_data_vals \ - [model_fcst_lead_idx] - != '--'): - model_CI_data[vt] = ( - float(model_CI_file_data_vals \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - else: - if model_num == 1: - diff_from_avg_data = model_avg_data[0,:] - else: - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot " - +"name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot " - +"name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - model_fcst_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals \ - [model_fcst_lead_idx] - != '--'): - model_CI_data[vt] = ( - float(model_CI_file_data_vals \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - if model_num == 1: - fig, (ax1, ax2) = plt.subplots(2,1,figsize=(10,12), - sharex=True) - ax1.grid(True) - ax1.tick_params(axis='x', pad=15) - ax1.set_xticks(fcst_var_threshs_float) - ax1.set_xticklabels(fcst_var_threshs_format) - ax1.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax1.tick_params(axis='y', pad=15) - ax1.set_ylabel(average_method.title(), labelpad=30) - ax2.grid(True) - ax2.tick_params(axis='x', pad=15) - ax2.set_xlabel('Forecast Threshold', labelpad=30) - ax2.tick_params(axis='y', pad=15) - ax2.set_ylabel('Difference', labelpad=30) - boxstyle = matplotlib.patches.BoxStyle('Square', pad=0.25) - props = {'boxstyle': boxstyle, - 'facecolor': 'white', - 'linestyle': 'solid', - 'linewidth': 1, - 'edgecolor': 'black',} - ax2.text(0.7055, 1.05, 'Note: differences outside the ' - +'outline bars are significant\n at the 95% ' - +'confidence interval', ha='center', va='center', - fontsize=10, bbox=props, transform=ax2.transAxes) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.plot(fcst_var_threshs_float, model_avg_data[1,:], - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs', - zorder=4) - ax2.plot(fcst_var_threshs_float, - np.zeros_like(fcst_var_threshs_float), - color='#888888', - ls='-', linewidth=2.0, - zorder=4) - ax2.plot(fcst_var_threshs_float, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - else: - ax2.plot(fcst_var_threshs_float, - np.zeros_like(fcst_var_threshs_float), - color='black', - ls='-', linewidth=2.0, - zorder=4) - ax1.plot(fcst_var_threshs_float, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - else: - ax1.plot(fcst_var_threshs_float, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax2.plot(fcst_var_threshs_float, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - ax2.bar(fcst_var_threshs_float, 2*np.absolute(model_CI_data), - bottom=-1*np.absolute(model_CI_data), - width=CI_bar_max_widths-(CI_bar_intvl_widths*model_idx), - color='None', edgecolor=colors[model_idx], linewidth=1.5) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels+1, fontsize='13', - mode='expand', borderaxespad=0.) - else: - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels, fontsize='13', - mode='expand', borderaxespad=0.) - savefig_imagename = (stat+'_' - +base_name.replace('FCSTTHRESHHOLDER', - 'all') \ - .replace('OBSTHRESHHOLDER', - 'all') \ - .replace('_avgs', fcst_lead) - +'.png') - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_threshold_by_lead.py b/ush/plotting_scripts/plot_threshold_by_lead.py deleted file mode 100644 index 2511db6de7..0000000000 --- a/ush/plotting_scripts/plot_threshold_by_lead.py +++ /dev/null @@ -1,700 +0,0 @@ -''' -Name: plot_threshold_by_lead.py -Contact(s): Mallory Row -Abstract: Reads average from plot_time_series.py to make treshold-lead plots -History Log: First version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = [os.environ['FCST_THRESH'].split(', ')] -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = [os.environ['OBS_THRESH'].split(', ')] -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_threshs = plot_info[2] - obs_var_threshs = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_threshs) - ] - fcst_var_threshs_format = np.full_like( - fcst_var_threshs, np.nan, dtype=object - ) - fcst_var_threshs_float = np.full_like( - fcst_var_threshs, np.nan, dtype=float - ) - for fcst_var_thresh in fcst_var_threshs: - fcst_var_thresh_idx = fcst_var_threshs.index(fcst_var_thresh) - fcst_var_thresh_symbol, fcst_var_thresh_letter = ( - plot_util.format_thresh(fcst_var_thresh) - ) - fcst_var_threshs_format[fcst_var_thresh_idx] = fcst_var_thresh_letter - fcst_var_threshs_float[fcst_var_thresh_idx] = ( - fcst_var_thresh_letter[2:] - ) - xmesh, ymesh = np.meshgrid(fcst_var_threshs_float, fcst_lead_timedeltas) - obs_var_threshs_format = np.full_like( - obs_var_threshs, np.nan, dtype=object - ) - for obs_var_thresh in obs_var_threshs: - obs_var_thresh_idx = obs_var_threshs.index(obs_var_thresh) - obs_var_thresh_symbol, obs_var_thresh_letter = ( - plot_util.format_thresh(obs_var_thresh) - ) - obs_var_threshs_format[obs_var_thresh_idx] = obs_var_thresh_letter - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +'FCSTTHRESHHOLDER'+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +'OBSTHRESHHOLDER'+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_leads), - len(fcst_var_threshs_format)] - ) - model_avg_data.fill(np.nan) - for vt in range(len(fcst_var_threshs_format)): - fcst_var_thresh_format = fcst_var_threshs_format[vt] - obs_var_thresh_format = obs_var_threshs_format[vt] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'.txt' -# ) - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" " - +fcst_var_level+" "+fcst_var_thresh_format) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index( - fcst_lead - ) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, - fcst_lead_idx, vt] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") - if model_num == 1: - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_avg_data = model_avg_data[1,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Threshold', labelpad=20) - ax.set_xticks(fcst_var_threshs_float) - ax.set_xticklabels(fcst_var_threshs_format) - ax.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Forecast Lead', labelpad=20) - ax.set_yticks(fcst_lead_timedeltas) - ax.set_yticklabels(fcst_lead_timedeltas_str) - ax.set_ylim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_avg_data, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_avg_data, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, CF1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Threshold', labelpad=20) - ax.set_xticks(fcst_var_threshs_float) - ax.set_xticklabels(fcst_var_threshs_format) - ax.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Forecast Lead', labelpad=20) - ax.set_yticks(fcst_lead_timedeltas) - ax.set_yticklabels(fcst_lead_timedeltas_str) - ax.set_ylim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_avg_data[0,:,:] - - model_avg_data[1,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num) - +" "+model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_avg_data[0,:,:] - ) - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_avg_data = model_avg_data[0,:,:] - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_avg_data[0,:,:] - model1_avg_data - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTTHRESHHOLDER', 'all') \ - .replace('OBSTHRESHHOLDER','all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_time_series.py b/ush/plotting_scripts/plot_time_series.py deleted file mode 100644 index cb90cc184a..0000000000 --- a/ush/plotting_scripts/plot_time_series.py +++ /dev/null @@ -1,775 +0,0 @@ -''' -Name: plot_time_series.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper - run_all_times to make time series plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import math -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) -# Significance testing info -# need to set up random number array [nmodels, ntests, ndays] -# for EMC Monte Carlo testing. Each model has its own -# "series" of random numbers used at all forecast hours -# and thresholds. -mc_dates, mc_expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - '000000' -) -ndays = len(mc_expected_stat_file_dates) -ntests = 10000 -randx = np.random.rand(nmodels,ntests,ndays) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead+" " - +"and forecast variable "+fcst_var_name+" "+fcst_var_level+" " - +fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Set up expected date in MET .stat file and date plot information - plot_time_dates, expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from METplus " - +"conf file has length of 0. Not enough information " - +"was provided to build date information. Please check " - +"provided VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_data_now_index = pd.MultiIndex.from_product( - [[model_plot_name], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_now_data = pd.DataFrame(np.nan, - index=model_data_now_index, - columns=[ 'TOTAL' ]) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_now_stat_file_data.rename( - columns=dict(zip( - model_now_stat_file_data.columns[:nbase_columns], - stat_file_base_columns - )), inplace=True - ) - line_type = model_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_now_stat_file_data.rename( - columns=dict(zip( - model_now_stat_file_data.columns[nbase_columns:], - stat_file_line_type_columns - )), inplace=True - ) - model_now_stat_file_data_fcst_valid_dates = ( - model_now_stat_file_data.loc[:]['FCST_VALID_BEG'].values - ) - model_now_data = ( - pd.DataFrame(np.nan, index=model_data_now_index, - columns=stat_file_line_type_columns) - ) - model_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_now_stat_file_data.loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_now_stat_file_data.loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_now_stat_file_data_fcst_valid_dates: - matching_date_idx = ( - model_now_stat_file_data_fcst_valid_dates \ - .tolist().index(expected_date) - ) - model_now_stat_file_data_indexed = ( - model_now_stat_file_data.loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_now_data.loc[ - (model_plot_name, expected_date) - ][col] = ( - model_now_stat_file_data_indexed.loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_now_data = pd.DataFrame(np.nan, - index=model_data_now_index, - columns=[ 'TOTAL' ]) - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - # Calculate statistics and plots - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == 'True': - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0])): - stat_values_array[l,:,:] = ( - np.ma.mask_cols(stat_values_array[l,:,:]) - ) - np.ma.set_fill_value(stat_values_array, np.nan) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[:,model_idx,:] - -# lead_avg_filename = stat + '_' + os.path.basename(model_info[3]) - - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename -# if 'fcst_lead' + fcst_lead in model_info[3]: -# lead_avg_filename.replace('fcst_lead' + fcst_lead, 'fcst_lead_avgs') -# lead_avg_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename -# else: -# lead_avg_filename.replace('fcst_lead' + fcst_lead, '') -# lead_avg_filename += '_fcst_lead_avgs.txt' - -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - - - # Write model forecast lead average to file - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - logger.debug("Writing model "+str(model_num)+" "+model_name+" " - +"with name on plot "+model_plot_name+" lead " - +fcst_lead+" average to file: "+lead_avg_file) - model_stat_average_array = plot_util.calculate_average( - logger, average_method, stat, model_data.loc[[model_plot_name]], - model_stat_values_array - ) - with open(lead_avg_file, 'a') as file2write: - file2write.write(fcst_lead) - if fcst_var_units_plot_title != '': - file2write.write(' '+fcst_var_units_plot_title) - else: - file2write.write(' [NA]') - if obs_var_units_plot_title != '': - file2write.write(' '+obs_var_units_plot_title) - else: - file2write.write(' [NA]') - for l in range(len(model_stat_average_array)): - file2write.write( - ' '+str(model_stat_average_array[l]) - ) - file2write.write('\n') - # Write confidence intervals to file, if requested, - # using similar naming to model forecast lead average - if ci_method != 'NONE': -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_CI_'+ci_method+'.txt' -# ).replace('fcst_lead'+fcst_lead, 'fcst_lead_avgs') -# CI_filename = stat + '_' + os.path.basename(model_info[3]) - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename -# if 'fcst_lead' + fcst_lead in model_info[3]: -# CI_filename.replace('fcst_lead' + fcst_lead, 'fcst_lead_avgs') -# CI_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename -# else: -# CI_filename.replace('fcst_lead' + fcst_lead, '') -# CI_filename += '_fcst_lead_avgs' - -# CI_filename += '_CI_' + ci_method + '.txt' - -# CI_file = os.path.join(output_base_dir, 'data', -# CI_filename) - - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Writing "+ci_method+" confidence intervals " - +"for difference between model " - +str(model_num)+" "+model_name+" with name " - +"on plot "+model_plot_name+" and the " - +"observations at lead "+fcst_lead+" to " - +"file: "+CI_file) - if ci_method == 'EMC_MONTE_CARLO': - logger.warning("Monte Carlo resampling not " - +"done for fbar_obar, orate_frate, " - +"or baser_frate.") - stat_CI = '--' - else: - stat_CI = plot_util.calculate_ci( - logger, ci_method, model_stat_values_array[0,:], - model_stat_values_array[1,:],total_dates, - stat, average_method, randx[model_idx,:,:] - ) - with open(CI_file, 'a') as file2write: - file2write.write(fcst_lead+' '+str(stat_CI)+'\n') - else: - if model_num == 1: - model1_stat_values_array = ( - model_stat_values_array[0,:] - ) - model1_plot_name = model_plot_name - model1_name = model_name - else: - logger.debug("Writing "+ci_method+" confidence " - +"intervals for difference between " - +"model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name+" and model 1 " - +model1_name+" with name on plot " - +model1_plot_name+" at lead " - +fcst_lead+" to file: "+CI_file) - if ci_method == 'EMC_MONTE_CARLO': - stat_CI = plot_util.calculate_ci( - logger, ci_method, - model_data.loc[[model_plot_name]], - model_data.loc[[model1_plot_name]], total_dates, - stat, average_method, randx[model_idx,:,:] - ) - else: - stat_CI = plot_util.calculate_ci( - logger, ci_method, model_stat_values_array, - model1_stat_values_array, total_dates, - stat, average_method, randx[model_idx,:,:] - ) - with open(CI_file, 'a') as file2write: - file2write.write(fcst_lead+' '+str(stat_CI)+'\n') - logger.debug("Plotting model "+str(model_num)+" "+model_name+" " - +"with name on plot "+model_plot_name) - if model_num == 1: - fig, ax = plt.subplots(1,1,figsize=(10,6)) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=30) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(stat_plot_name, labelpad=30) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - obs_stat_values_array = model_stat_values_array[1,:] - obs_count = ( - len(obs_stat_values_array) - - np.ma.count_masked(obs_stat_values_array) - ) - plot_time_dates_m = np.ma.masked_where( - np.ma.getmask(obs_stat_values_array), plot_time_dates - ) - plot_time_dates_mc = np.ma.compressed(plot_time_dates_m) - obs_stat_values_mc = np.ma.compressed( - obs_stat_values_array - ) - if np.ma.is_masked(model_stat_average_array[1]): - obs_legend_label = ( - 'obs ' - +str(model_stat_average_array[1])+' ' - +str(obs_count) - ) - else: - obs_legend_label = ( - 'obs ' - +str(round(model_stat_average_array[1],3))+' ' - +str(obs_count) - ) - ax.plot_date(plot_time_dates_mc, - obs_stat_values_mc, - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=obs_legend_label, - zorder=4) - count = ( - len(model_stat_values_array[0,:]) - - np.ma.count_masked(model_stat_values_array[0,:]) - ) - plot_time_dates_m = np.ma.masked_where( - np.ma.getmask(model_stat_values_array[0,:]), plot_time_dates - ) - plot_time_dates_mc = np.ma.compressed(plot_time_dates_m) - model_stat_values_mc = np.ma.compressed( - model_stat_values_array[0,:] - ) - if np.ma.is_masked(model_stat_average_array[0]): - model_legend_label = ( - model_plot_name+' ' - +str(model_stat_average_array[0])+' ' - +str(count) - ) - else: - model_legend_label = ( - model_plot_name+' ' - +str(round(model_stat_average_array[0],3))+' ' - +str(count) - ) - ax.plot_date(plot_time_dates_mc, model_stat_values_mc, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_legend_label, - zorder=(nmodels-model_idx)+4) - ax.legend(bbox_to_anchor=(1.025, 1.0, 0.375, 0.0), loc='upper right', - ncol=1, fontsize='13', mode='expand', borderaxespad=0.) - ax.set_title(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = stat+'_'+base_name+'.png' - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_util.py b/ush/plotting_scripts/plot_util.py deleted file mode 100644 index 898c93e0c6..0000000000 --- a/ush/plotting_scripts/plot_util.py +++ /dev/null @@ -1,1128 +0,0 @@ -import os -import datetime as datetime -import time -import numpy as np -import pandas as pd - - -"""!@namespace plot_util - @brief Provides utility functions for METplus plotting use case. -""" - -def get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - lead): - """! Create arrays of requested dates plotting and - dates expected to be in MET .stat files - - Args: - date_type - string of describing the treatment - of dates, either VALID or INIT - date_beg - string of beginning date, - either blank or %Y%m%d format - date_end - string of end date, - either blank or %Y%m%d format - fcst_valid_hour - string of forecast valid hour(s) - information, blank or in %H%M%S - fcst_init_hour - string of forecast init hour(s) - information, blank or in %H%M%S - obs_valid_hour - string of observation valid hour(s) - information, blank or in %H%M%S - obs_init_hour - string of observation hour(s) - information, blank or in %H%M%S - lead - string of forecast lead, in %H%M%S - format - - Returns: - plot_time_dates - array of ordinal dates based on user - provided information - expected_stat_file_dates - array of dates that are expected to - be found in the MET .stat files - based on user provided information, - formatted as %Y%m%d_%H%M%S - """ - lead_hour_seconds = int(int(lead[:-4])%24) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - valid_init_time_info = { - 'fcst_valid_time': list(filter(None, fcst_valid_hour.split(', '))), - 'fcst_init_time': list(filter(None, fcst_init_hour.split(', '))), - 'obs_valid_time': list(filter(None, obs_valid_hour.split(', '))), - 'obs_init_time': list(filter(None, obs_init_hour.split(', '))), - } - # Extract missing information, if possible - for type in ['fcst', 'obs']: - valid_time_list = valid_init_time_info[type+'_valid_time'] - init_time_list = valid_init_time_info[type+'_init_time'] - if (len(valid_time_list) == 0 - and len(init_time_list) > 0): - for itime in init_time_list: - itime_hour_seconds = int(int(itime[0:2])%24) * 3600 - itime_min_seconds = int(itime[2:4]) * 60 - itime_seconds = int(itime[4:]) - offset = datetime.timedelta(seconds=lead_hour_seconds - + lead_min_seconds - + lead_seconds - + itime_hour_seconds - + itime_min_seconds - + itime_seconds) - tot_sec = offset.total_seconds() - valid_hour = int(tot_sec//3600) - valid_min = int((tot_sec%3600) // 60) - valid_sec = int((tot_sec%3600)%60) - valid_time = ( - str(valid_hour).zfill(2) - +str(valid_min).zfill(2) - +str(valid_sec).zfill(2) - ) - valid_init_time_info[type+'_valid_time'].append(valid_time) - if (len(init_time_list) == 0 - and len(valid_time_list) > 0): - for vtime in valid_time_list: - vtime_hour_seconds = int(int(vtime[0:2])%24) * 3600 - vtime_min_seconds = int(vtime[2:4]) * 60 - vtime_seconds = int(vtime[4:]) - offset = datetime.timedelta(seconds=lead_hour_seconds - + lead_min_seconds - + lead_seconds - - vtime_hour_seconds - - vtime_min_seconds - - vtime_seconds) - tot_sec = offset.total_seconds() - init_hour = int(tot_sec//3600) - init_min = int((tot_sec%3600) // 60) - init_sec = int((tot_sec%3600)%60) - init_time = ( - str(init_hour).zfill(2) - +str(init_min).zfill(2) - +str(init_sec).zfill(2) - ) - valid_init_time_info[type+'_init_time'].append(init_time) - for type in ['valid', 'init']: - fcst_time_list = valid_init_time_info['fcst_'+type+'_time'] - obs_time_list = valid_init_time_info['obs_'+type+'_time'] - if len(fcst_time_list) == 0: - if len(obs_time_list) > 0: - valid_init_time_info['fcst_'+type+'_time'] = ( - valid_init_time_info['obs_'+type+'_time'] - ) - if len(obs_time_list) == 0: - if len(fcst_time_list) > 0: - valid_init_time_info['obs_'+type+'_time'] = ( - valid_init_time_info['fcst_'+type+'_time'] - ) - date_info = {} - for type in ['fcst_'+date_type.lower(), - 'obs_'+date_type.lower()]: - time_list = valid_init_time_info[type+'_time'] - if len(time_list) != 0: - time_beg = min(time_list) - time_end = max(time_list) - if time_beg == time_end or len(time_list) == 1: - delta_t = datetime.timedelta(seconds=86400) - else: - delta_t_list = [] - for t in range(len(time_list)): - if time_list[t] == time_end: - delta_t_list.append( - ( - datetime.datetime.strptime('235959','%H%M%S') - - (datetime.datetime.strptime(time_list[t], - '%H%M%S')) - ) - + datetime.timedelta(seconds = 1) - ) - else: - delta_t_list.append( - datetime.datetime.strptime(time_list[t+1], - '%H%M%S') - - datetime.datetime.strptime(time_list[t], - '%H%M%S') - ) - delta_t_array = np.array(delta_t_list) - if np.all(delta_t_array == delta_t_array[0]): - delta_t = delta_t_array[0] - else: - delta_t = np.min(delta_t_array) - beg = datetime.datetime.strptime( - date_beg+time_beg, '%Y%m%d%H%M%S' - ) - end = datetime.datetime.strptime( - date_end+time_end, '%Y%m%d%H%M%S' - ) - dates = np.arange( - beg, end+delta_t, - delta_t - ).astype(datetime.datetime) - else: - dates = [] - date_info[type+'_dates'] = dates - # Build opposite dates - if date_type == 'VALID': - oppo_date_type = 'INIT' - elif date_type == 'INIT': - oppo_date_type = 'VALID' - lead_timedelta = datetime.timedelta( - seconds=(int(int(lead[:-4])) * 3600 + lead_min_seconds - + lead_seconds) - ) - if oppo_date_type == 'INIT': - lead_timedelta = -1 * lead_timedelta - for type in ['fcst', 'obs']: - date_info[type+'_'+oppo_date_type.lower()+'_dates'] = ( - date_info[type+'_'+date_type.lower()+'_dates'] + lead_timedelta - ) - # Use fcst_*_dates for dates - # this makes the assumption that - # fcst_*_dates and obs_*_dates - # are the same, and they should be for - # most cases - dates = date_info['fcst_'+date_type.lower()+'_dates'] - fv_dates = date_info['fcst_valid_dates'] - plot_time_dates = [] - expected_stat_file_dates = [] - for date in dates: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - plot_time_dates.append(date.toordinal() + seconds/86400.) - # MET .stat files saves valid dates in file - fv_dates = date_info['fcst_valid_dates'] - expected_stat_file_dates = [] - for fv_date in fv_dates: - expected_stat_file_dates.append(fv_date.strftime('%Y%m%d_%H%M%S')) - return plot_time_dates, expected_stat_file_dates - -def format_thresh(thresh): - """! Format thresholds for file naming - - Args: - thresh - string of the treshold(s) - - Return: - thresh_symbol - string of the threshold(s) - with symbols - thresh_letters - string of the threshold(s) - with letters - """ - thresh_list = thresh.split(' ') - thresh_symbol = '' - thresh_letter = '' - for thresh in thresh_list: - if thresh == '': - continue - thresh_value = thresh - for opt in ['>=', '>', '==','!=','<=', '<', - 'ge', 'gt', 'eq', 'ne', 'le', 'lt']: - if opt in thresh_value: - thresh_opt = opt - thresh_value = thresh_value.replace(opt, '') - if thresh_opt in ['>', 'gt']: - thresh_symbol+='>'+thresh_value - thresh_letter+='gt'+thresh_value - elif thresh_opt in ['>=', 'ge']: - thresh_symbol+='>='+thresh_value - thresh_letter+='ge'+thresh_value - elif thresh_opt in ['<', 'lt']: - thresh_symbol+='<'+thresh_value - thresh_letter+='lt'+thresh_value - elif thresh_opt in ['<=', 'le']: - thresh_symbol+='<='+thresh_value - thresh_letter+='le'+thresh_value - elif thresh_opt in ['==', 'eq']: - thresh_symbol+='=='+thresh_value - thresh_letter+='eq'+thresh_value - elif thresh_opt in ['!=', 'ne']: - thresh_symbol+='!='+thresh_value - thresh_letter+='ne'+thresh_value - return thresh_symbol, thresh_letter - -def get_stat_file_base_columns(met_version): - """! Get the standard MET .stat file columns based on - version number - - Args: - met_version - string of MET version - number being used to - run stat_analysis - - Returns: - stat_file_base_columns - list of the standard - columns shared among the - different line types - """ - met_version = float(met_version) - if met_version < 8.1: - stat_file_base_columns = [ - 'VERSION', 'MODEL', 'DESC', 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_LEV', 'OBS_VAR', 'OBS_LEV', 'OBTYPE', 'VX_MASK', - 'INTERP_MTHD', 'INTERP_PNTS', 'FCST_THRESH', 'OBS_THRESH', - 'COV_THRESH', 'ALPHA', 'LINE_TYPE' - ] - else: - stat_file_base_columns = [ - 'VERSION', 'MODEL', 'DESC', 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_UNITS', 'FCST_LEV', 'OBS_VAR', 'OBS_UNITS', - 'OBS_LEV', 'OBTYPE', 'VX_MASK', 'INTERP_MTHD', 'INTERP_PNTS', - 'FCST_THRESH', 'OBS_THRESH', 'COV_THRESH', 'ALPHA', 'LINE_TYPE' - ] - return stat_file_base_columns - -def get_stat_file_line_type_columns(logger, met_version, line_type): - """! Get the MET .stat file columns for line type based on - version number - - Args: - met_version - string of MET version number - being used to run stat_analysis - line_type - string of the line type of the MET - .stat file being read - - Returns: - stat_file_line_type_columns - list of the line - type columns - """ - met_version = float(met_version) - if line_type == 'SL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', 'FFBAR', 'OOBAR', 'MAE' - ] - elif line_type == 'SAL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FABAR', 'OABAR', 'FOABAR', 'FFABAR', 'OOABAR', 'MAE' - ] - elif line_type == 'VL1L2': - if met_version <= 6.1: - stat_file_line_type_columns = [ - 'TOTAL', 'UFBAR', 'VFBAR', 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR' - ] - elif met_version >= 7.0: - stat_file_line_type_columns = [ - 'TOTAL', 'UFBAR', 'VFBAR', 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR', 'F_SPEED_BAR', 'O_SPEED_BAR' - ] - elif line_type == 'VAL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'UFABAR', 'VFABAR', 'UOABAR', 'VOABAR', 'UVFOABAR', - 'UVFFABAR', 'UVOOABAR' - ] - elif line_type == 'VCNT': - if met_version >= 7.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FBAR', 'FBAR_NCL', 'FBAR_NCU', 'OBAR', 'OBAR_NCL', - 'OBAR_NCU', 'FS_RMS', 'FS_RMS_NCL', 'FS_RMS_NCU', 'OS_RMS', - 'OS_RMS_NCL', 'OS_RMS_NCU', 'MSVE', 'MSVE_NCL', 'MSVE_NCU', - 'RMSVE', 'RMSVE_NCL', 'RMSVE_NCU', 'FSTDEV', 'FSTDEV_NCL', - 'FSTDEV_NCU', 'OSTDEV', 'OSTDEV_NCL', 'OSTDEV_NCU', 'FDIR', - 'FDIR_NCL', 'FDIR_NCU', 'ODIR', 'ODIR_NCL', 'ODIR_NCU', - 'FBAR_SPEED', 'FBAR_SPEED_NCL', 'FBAR_SPEED_NCU', 'OBAR_SPEED', - 'OBAR_SPEED_NCL', 'OBAR_SPEED_NCU', 'VDIFF_SPEED', - 'VDIFF_SPEED_NCL', 'VDIFF_SPEED_NCU', 'VDIFF_DIR', - 'VDIFF_DIR_NCL', 'VDIFF_DIR_NCU', 'SPEED_ERR', 'SPEED_ERR_NCL', - 'SPEED_ERR_NCU', 'SPEED_ABSERR', 'SPEED_ABSERR_NCL', - 'SPEED_ABSERR_NCU', 'DIR_ERR', 'DIR_ERR_NCL', 'DIR_ERR_NCU', - 'DIR_ABSERR', 'DIR_ABSERR_NCL', 'DIR_ABSERR_NCU' - ] - else: - logger.error("VCNT is not a valid LINE_TYPE in METV"+met_version) - exit(1) - elif line_type == 'CTC': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FY_OY', 'FY_ON', 'FN_OY', 'FN_ON' - ] - return stat_file_line_type_columns - -def get_clevels(data): - """! Get contour levels for plotting - - Args: - data - array of data to be contoured - - Returns: - clevels - array of contoure levels - """ - if np.abs(np.nanmin(data)) > np.nanmax(data): - cmax = np.abs(np.nanmin(data)) - cmin = np.nanmin(data) - else: - cmax = np.nanmax(data) - cmin = -1 * np.nanmax(data) - if cmax > 1: - cmin = round(cmin-1,0) - cmax = round(cmax+1,0) - else: - cmin = round(cmin-0.1,1) - cmax = round(cmax+0.1,1) - clevels = np.linspace(cmin, cmax, 11, endpoint=True) - return clevels - -def calculate_average(logger, average_method, stat, model_dataframe, - model_stat_values): - """! Calculate average of dataset - - Args: - logger - logging file - average_method - string of the method to - use to calculate the - average - stat - string of the statistic the - average is being taken for - model_dataframe - dataframe of model .stat - columns - model_stat_values - array of statistic values - - Returns: - average_array - array of average value(s) - """ - average_array = np.empty_like(model_stat_values[:,0]) - if average_method == 'MEAN': - for l in range(len(model_stat_values[:,0])): - average_array[l] = np.ma.mean(model_stat_values[l,:]) - elif average_method == 'MEDIAN': - for l in range(len(model_stat_values[:,0])): - logger.info(np.ma.median(model_stat_values[l,:])) - average_array[l] = np.ma.median(model_stat_values[l,:]) - elif average_method == 'AGGREGATION': - ndays = model_dataframe.shape[0] - model_dataframe_aggsum = ( - model_dataframe.groupby('model_plot_name').agg(['sum']) - ) - model_dataframe_aggsum.columns = ( - model_dataframe_aggsum.columns.droplevel(1) - ) - avg_values, avg_array, stat_plot_name = ( - calculate_stat(logger, model_dataframe_aggsum/ndays, stat) - ) - for l in range(len(avg_array[:,0])): - average_array[l] = avg_array[l] - else: - logger.error("Invalid entry for MEAN_METHOD, " - +"use MEAN, MEDIAN, or AGGREGATION") - exit(1) - return average_array - -def calculate_ci(logger, ci_method, modelB_values, modelA_values, total_days, - stat, average_method, randx): - """! Calculate confidence intervals between two sets of data - - Args: - logger - logging file - ci_method - string of the method to use to - calculate the confidence intervals - modelB_values - array of values - modelA_values - array of values - total_days - float of total number of days - being considered, sample size - stat - string of the statistic the - confidence intervals are being - calculated for - average_method - string of the method to - use to calculate the - average - randx - 2D array of random numbers [0,1) - - Returns: - intvl - float of the confidence interval - """ - if ci_method == 'EMC': - modelB_modelA_diff = modelB_values - modelA_values - ndays = total_days - np.ma.count_masked(modelB_modelA_diff) - modelB_modelA_diff_mean = modelB_modelA_diff.mean() - modelB_modelA_std = np.sqrt( - ((modelB_modelA_diff - modelB_modelA_diff_mean)**2).mean() - ) - if ndays >= 80: - intvl = 1.960*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays >= 40 and ndays < 80: - intvl = 2.000*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays >= 20 and ndays < 40: - intvl = 2.042*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays < 20: - intvl = 2.228*modelB_modelA_std/np.sqrt(ndays-1) - elif ci_method == 'EMC_MONTE_CARLO': - ntest, ntests = 1, 10000 - scores_rand1 = np.empty(ntests) - scores_rand2 = np.empty(ntests) - scores_diff = np.empty(ntests) - while ntest <= ntests: - rand1_data = pd.DataFrame( - np.nan, index=modelB_values.index, - columns=modelB_values.columns - ) - replace_level= rand1_data.index.get_level_values(0)[0] - rand1_data.rename(index={replace_level: 'rand1'}, inplace=True) - rand2_data = pd.DataFrame( - np.nan, index=modelB_values.index, - columns=modelB_values.columns - ) - replace_level= rand2_data.index.get_level_values(0)[0] - rand2_data.rename(index={replace_level: 'rand2'}, inplace=True) - nday, ndays = 1, total_days - while nday <= ndays: - if randx[ntest-1,nday-1] - 0.5 >= 0: - rand1_data.iloc[nday-1,:] = modelA_values.iloc[nday-1,:] - rand2_data.iloc[nday-1,:] = modelB_values.iloc[nday-1,:] - else: - rand1_data.iloc[nday-1,:] = modelB_values.iloc[nday-1,:] - rand2_data.iloc[nday-1,:] = modelA_values.iloc[nday-1,:] - nday+=1 - rand1_stat_values, rand1_stat_values_array, stat_plot_name = ( - calculate_stat(logger, rand1_data, stat) - ) - rand2_stat_values, rand2_stat_values_array, stat_plot_name = ( - calculate_stat(logger, rand2_data, stat) - ) - rand1_average_array = calculate_average(logger, average_method, - stat, rand1_data, - rand1_stat_values_array[:,0,:]) - scores_rand1[ntest-1] = rand1_average_array[0] - rand2_average_array = calculate_average(logger, average_method, - stat, rand2_data, - rand2_stat_values_array[:,0,:]) - scores_rand2[ntest-1] = rand2_average_array[0] - scores_diff[ntest-1] = ( - rand2_average_array[0] - rand1_average_array[0] - ) - ntest+=1 - scores_diff_mean = np.sum(scores_diff)/ntests - scores_diff_var = np.sum((scores_diff-scores_diff_mean)**2) - scores_diff_std = np.sqrt(scores_diff_var/(ntests-1)) - intvl = 1.96*scores_diff_std - else: - logger.error("Invalid entry for MAKE_CI_METHOD, " - +"use EMC, EMC_MONTE_CARLO") - exit(1) - return intvl - -def get_stat_plot_name(logger, stat): - """! Get the formalized name of the statistic being plotted - - Args: - stat - string of the simple statistic - name being plotted - - Returns: - stat_plot_name - string of the formal statistic - name being plotted - """ - if stat == 'bias': - stat_plot_name = 'Bias' - elif stat == 'rmse': - stat_plot_name = 'Root Mean Square Error' - elif stat == 'msess': - stat_plot_name = "Murphy's Mean Square Error Skill Score" - elif stat == 'rsd': - stat_plot_name = 'Ratio of Standard Deviation' - elif stat == 'rmse_md': - stat_plot_name = 'Root Mean Square Error from Mean Error' - elif stat == 'rmse_pv': - stat_plot_name = 'Root Mean Square Error from Pattern Variation' - elif stat == 'pcor': - stat_plot_name = 'Pattern Correlation' - elif stat == 'acc': - stat_plot_name = 'Anomaly Correlation Coefficient' - elif stat == 'fbar': - stat_plot_name = 'Forecast Averages' - elif stat == 'fbar_obar': - stat_plot_name = 'Forecast and Observation Averages' - elif stat == 'speed_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Speeds' - ) - elif stat == 'dir_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Direction' - ) - elif stat == 'rmsve': - stat_plot_name = 'Root Mean Square Difference Vector Error' - elif stat == 'vdiff_speed': - stat_plot_name = 'Difference Vector Speed' - elif stat == 'vdiff_dir': - stat_plot_name = 'Difference Vector Direction' - elif stat == 'fbar_obar_speed': - stat_plot_name = 'Average Wind Vector Speed' - elif stat == 'fbar_obar_dir': - stat_plot_name = 'Average Wind Vector Direction' - elif stat == 'fbar_speed': - stat_plot_name = 'Average Forecast Wind Vector Speed' - elif stat == 'fbar_dir': - stat_plot_name = 'Average Forecast Wind Vector Direction' - elif stat == 'orate': - stat_plot_name = 'Observation Rate' - elif stat == 'baser': - stat_plot_name = 'Base Rate' - elif stat == 'frate': - stat_plot_name = 'Forecast Rate' - elif stat == 'orate_frate': - stat_plot_name = 'Observation and Forecast Rates' - elif stat == 'baser_frate': - stat_plot_name = 'Base and Forecast Rates' - elif stat == 'accuracy': - stat_plot_name = 'Accuracy' - elif stat == 'fbias': - stat_plot_name = 'Frequency Bias' - elif stat == 'pod': - stat_plot_name = 'Probability of Detection' - elif stat == 'hrate': - stat_plot_name = 'Hit Rate' - elif stat == 'pofd': - stat_plot_name = 'Probability of False Detection' - elif stat == 'farate': - stat_plot_name = 'False Alarm Rate' - elif stat == 'podn': - stat_plot_name = 'Probability of Detection of the Non-Event' - elif stat == 'faratio': - stat_plot_name = 'False Alarm Ratio' - elif stat == 'csi': - stat_plot_name = 'Critical Success Index' - elif stat == 'ts': - stat_plot_name = 'Threat Score' - elif stat == 'gss': - stat_plot_name = 'Gilbert Skill Score' - elif stat == 'ets': - stat_plot_name = 'Equitable Threat Score' - elif stat == 'hk': - stat_plot_name = 'Hanssen-Kuipers Discriminant' - elif stat == 'tss': - stat_plot_name = 'True Skill Score' - elif stat == 'pss': - stat_plot_name = 'Peirce Skill Score' - elif stat == 'hss': - stat_plot_name = 'Heidke Skill Score' - else: - logger.error(stat+" is not a valid option") - exit(1) - return stat_plot_name - -def calculate_stat(logger, model_data, stat): - """! Calculate the statistic from the data from the - read in MET .stat file(s) - - Args: - model_data - Dataframe containing the model(s) - information from the MET .stat - files - stat - string of the simple statistic - name being plotted - - Returns: - stat_values - Dataframe of the statistic values - stat_values_array - array of the statistic values - stat_plot_name - string of the formal statistic - name being plotted - """ - model_data_columns = model_data.columns.values.tolist() - if model_data_columns == [ 'TOTAL' ]: - logger.error("Empty model_data dataframe") - exit(1) - stat_values = model_data.loc[:]['TOTAL'] - else: - if all(elem in model_data_columns for elem in - ['FBAR', 'OBAR', 'MAE']): - line_type = 'SL1L2' - fbar = model_data.loc[:]['FBAR'] - obar = model_data.loc[:]['OBAR'] - fobar = model_data.loc[:]['FOBAR'] - ffbar = model_data.loc[:]['FFBAR'] - oobar = model_data.loc[:]['OOBAR'] - elif all(elem in model_data_columns for elem in - ['FABAR', 'OABAR', 'MAE']): - line_type = 'SAL1L2' - fabar = model_data.loc[:]['FABAR'] - oabar = model_data.loc[:]['OABAR'] - foabar = model_data.loc[:]['FOABAR'] - ffabar = model_data.loc[:]['FFABAR'] - ooabar = model_data.loc[:]['OOABAR'] - elif all(elem in model_data_columns for elem in - ['UFBAR', 'VFBAR']): - line_type = 'VL1L2' - ufbar = model_data.loc[:]['UFBAR'] - vfbar = model_data.loc[:]['VFBAR'] - uobar = model_data.loc[:]['UOBAR'] - vobar = model_data.loc[:]['VOBAR'] - uvfobar = model_data.loc[:]['UVFOBAR'] - uvffbar = model_data.loc[:]['UVFFBAR'] - uvoobar = model_data.loc[:]['UVOOBAR'] - elif all(elem in model_data_columns for elem in - ['UFABAR', 'VFABAR']): - line_type = 'VAL1L2' - ufabar = model_data.loc[:]['UFABAR'] - vfabar = model_data.loc[:]['VFABAR'] - uoabar = model_data.loc[:]['UOABAR'] - voabar = model_data.loc[:]['VOABAR'] - uvfoabar = model_data.loc[:]['UVFOABAR'] - uvffabar = model_data.loc[:]['UVFFABAR'] - uvooabar = model_data.loc[:]['UVOOABAR'] - elif all(elem in model_data_columns for elem in - ['VDIFF_SPEED', 'VDIFF_DIR']): - line_type = 'VCNT' - fbar = model_data.loc[:]['FBAR'] - obar = model_data.loc[:]['OBAR'] - fs_rms = model_data.loc[:]['FS_RMS'] - os_rms = model_data.loc[:]['OS_RMS'] - msve = model_data.loc[:]['MSVE'] - rmsve = model_data.loc[:]['RMSVE'] - fstdev = model_data.loc[:]['FSTDEV'] - ostdev = model_data.loc[:]['OSTDEV'] - fdir = model_data.loc[:]['FDIR'] - odir = model_data.loc[:]['ODIR'] - fbar_speed = model_data.loc[:]['FBAR_SPEED'] - obar_speed = model_data.loc[:]['OBAR_SPEED'] - vdiff_speed = model_data.loc[:]['VDIFF_SPEED'] - vdiff_dir = model_data.loc[:]['VDIFF_DIR'] - speed_err = model_data.loc[:]['SPEED_ERR'] - dir_err = model_data.loc[:]['DIR_ERR'] - elif all(elem in model_data_columns for elem in - ['FY_OY', 'FN_ON']): - line_type = 'CTC' - total = model_data.loc[:]['TOTAL'] - fy_oy = model_data.loc[:]['FY_OY'] - fy_on = model_data.loc[:]['FY_ON'] - fn_oy = model_data.loc[:]['FN_OY'] - fn_on = model_data.loc[:]['FN_ON'] - else: - logger.error("Could not recognize line type from columns") - exit(1) - if stat == 'bias': - stat_plot_name = 'Bias' - if line_type == 'SL1L2': - stat_values = fbar - obar - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar) - np.sqrt(uvoobar) - elif line_type == 'VCNT': - stat_values = fbar - obar - elif line_type == 'CTC': - stat_values = (fy_oy + fy_on)/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse': - stat_plot_name = 'Root Mean Square Error' - if line_type == 'SL1L2': - stat_values = np.sqrt(ffbar + oobar - 2*fobar) - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar + uvoobar - 2*uvfobar) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'msess': - stat_plot_name = "Murphy's Mean Square Error Skill Score" - if line_type == 'SL1L2': - mse = ffbar + oobar - 2*fobar - var_o = oobar - obar*obar - stat_values = 1 - mse/var_o - elif line_type == 'VL1L2': - mse = uvffbar + uvoobar - 2*uvfobar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = 1 - mse/var_o - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rsd': - stat_plot_name = 'Ratio of Standard Deviation' - if line_type == 'SL1L2': - var_f = ffbar - fbar*fbar - var_o = oobar - obar*obar - stat_values = np.sqrt(var_f)/np.sqrt(var_o) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = np.sqrt(var_f)/np.sqrt(var_o) - elif line_type == 'VCNT': - stat_values = fstdev/ostdev - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse_md': - stat_plot_name = 'Root Mean Square Error from Mean Error' - if line_type == 'SL1L2': - stat_values = np.sqrt((fbar-obar)**2) - elif line_type == 'VL1L2': - stat_values = np.sqrt((ufbar - uobar)**2 + (vfbar - vobar)**2) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse_pv': - stat_plot_name = 'Root Mean Square Error from Pattern Variation' - if line_type == 'SL1L2': - var_f = ffbar - fbar**2 - var_o = oobar - obar**2 - R = (fobar - (fbar*obar))/(np.sqrt(var_f*var_o)) - stat_values = np.sqrt(var_f + var_o - 2*np.sqrt(var_f*var_o)*R) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - R = (uvfobar - ufbar*uobar - vfbar*vobar)/(np.sqrt(var_f*var_o)) - stat_values = np.sqrt(var_f + var_o - 2*np.sqrt(var_f*var_o)*R) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pcor': - stat_plot_name = 'Pattern Correlation' - if line_type == 'SL1L2': - var_f = ffbar - fbar*fbar - var_o = oobar - obar*obar - stat_values = (fobar - fbar*obar)/(np.sqrt(var_f*var_o)) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = (uvfobar - ufbar*uobar - vfbar*vobar)/(np.sqrt( - var_f*var_o)) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'acc': - stat_plot_name = 'Anomaly Correlation Coefficient' - if line_type == 'SAL1L2': - stat_values = \ - (foabar - fabar*oabar)/(np.sqrt( - (ffabar - fabar*fabar)*(ooabar - oabar*oabar))) - elif line_type == 'VAL1L2': - stat_values = (uvfoabar)/(np.sqrt(uvffabar*uvooabar)) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar': - stat_plot_name = 'Forecast Averages' - if line_type == 'SL1L2': - stat_values = fbar - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar) - elif line_type == 'VCNT': - stat_values = fbar - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar': - stat_plot_name = 'Forecast and Observation Averages' - if line_type == 'SL1L2': - stat_values = model_data.loc[:][['FBAR', 'OBAR']] - stat_values_fbar = model_data.loc[:]['FBAR'] - stat_values_obar = model_data.loc[:]['OBAR'] - elif line_type == 'VL1L2': - stat_values = model_data.loc[:][['UVFFBAR', 'UVOOBAR']] - stat_values_fbar = np.sqrt(model_data.loc[:]['UVFFBAR']) - stat_values_obar = np.sqrt(model_data.loc[:]['UVOOBAR']) - elif line_type == 'VCNT': - stat_values = model_data.loc[:][['FBAR', 'OBAR']] - stat_values_fbar = model_data.loc[:]['FBAR'] - stat_values_obar = model_data.loc[:]['OBAR'] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'speed_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Speeds' - ) - if line_type == 'VCNT': - stat_values = speed_err - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'dir_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Direction' - ) - if line_type == 'VCNT': - stat_values = dir_err - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmsve': - stat_plot_name = 'Root Mean Square Difference Vector Error' - if line_type == 'VCNT': - stat_values = rmsve - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'vdiff_speed': - stat_plot_name = 'Difference Vector Speed' - if line_type == 'VCNT': - stat_values = vdiff_speed - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'vdiff_dir': - stat_plot_name = 'Difference Vector Direction' - if line_type == 'VCNT': - stat_values = vdiff_dir - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar_speed': - stat_plot_name = 'Average Wind Vector Speed' - if line_type == 'VCNT': - stat_values = model_data.loc[:][('FBAR_SPEED', 'OBAR_SPEED')] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar_dir': - stat_plot_name = 'Average Wind Vector Direction' - if line_type == 'VCNT': - stat_values = model_data.loc[:][('FDIR', 'ODIR')] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_speed': - stat_plot_name = 'Average Forecast Wind Vector Speed' - if line_type == 'VCNT': - stat_values = fbar_speed - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_dir': - stat_plot_name = 'Average Forecast Wind Vector Direction' - if line_type == 'VCNT': - stat_values = fdir - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'orate' or stat == 'baser': - if stat == 'orate': - stat_plot_name = 'Observation Rate' - elif stat == 'baser': - stat_plot_name = 'Base Rate' - if line_type == 'CTC': - stat_values = (fy_oy + fn_oy)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'frate': - stat_plot_name = 'Forecast Rate' - if line_type == 'CTC': - stat_values = (fy_oy + fy_on)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'orate_frate' or stat == 'baser_frate': - if stat == 'orate_frate': - stat_plot_name = 'Observation and Forecast Rates' - elif stat == 'baser_frate': - stat_plot_name = 'Base and Forecast Rates' - if line_type == 'CTC': - stat_values_fbar = (fy_oy + fy_on)/total - stat_values_obar = (fy_oy + fn_oy)/total - stat_values = pd.concat([stat_values_fbar, stat_values_obar], - axis=1) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'accuracy': - stat_plot_name = 'Accuracy' - if line_type == 'CTC': - stat_values = (fy_oy + fn_on)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbias': - stat_plot_name = 'Frequency Bias' - if line_type == 'CTC': - stat_values = (fy_oy + fy_on)/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pod' or stat == 'hrate': - if stat == 'pod': - stat_plot_name = 'Probability of Detection' - elif stat == 'hrate': - stat_plot_name = 'Hit Rate' - if line_type == 'CTC': - stat_values = fy_oy/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pofd' or stat == 'farate': - if stat == 'pofd': - stat_plot_name = 'Probability of False Detection' - elif stat == 'farate': - stat_plot_name = 'False Alarm Rate' - if line_type == 'CTC': - stat_values = fy_on/(fy_on + fn_on) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'podn': - stat_plot_name = 'Probability of Detection of the Non-Event' - if line_type == 'CTC': - stat_values = fn_on/(fy_on + fn_on) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'faratio': - stat_plot_name = 'False Alarm Ratio' - if line_type == 'CTC': - stat_values = fy_on/(fy_on + fy_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'csi' or stat == 'ts': - if stat == 'csi': - stat_plot_name = 'Critical Success Index' - elif stat == 'ts': - stat_plot_name = 'Threat Score' - if line_type == 'CTC': - stat_values = fy_oy/(fy_oy + fy_on + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'gss' or stat == 'ets': - if stat == 'gss': - stat_plot_name = 'Gilbert Skill Score' - elif stat == 'ets': - stat_plot_name = 'Equitable Threat Score' - if line_type == 'CTC': - C = ((fy_oy + fy_on)*(fy_oy + fn_oy))/total - stat_values = (fy_oy - C)/(fy_oy + fy_on+ fn_oy - C) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'hk' or stat == 'tss' or stat == 'pss': - if stat == 'hk': - stat_plot_name = 'Hanssen-Kuipers Discriminant' - elif stat == 'tss': - stat_plot_name = 'True Skill Score' - elif stat == 'pss': - stat_plot_name = 'Peirce Skill Score' - if line_type == 'CTC': - stat_values = ( - ((fy_oy*fn_on)-(fy_on*fn_oy))/((fy_oy+fn_oy)*(fy_on+fn_on)) - ) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'hss': - stat_plot_name = 'Heidke Skill Score' - if line_type == 'CTC': - Ca = (fy_oy+fy_on)*(fy_oy+fn_oy) - Cb = (fn_oy+fn_on)*(fy_on+fn_on) - C = (Ca + Cb)/total - stat_values = (fy_oy + fn_on - C)/(total - C) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - else: - logger.error(stat+" is not a valid option") - exit(1) - nindex = stat_values.index.nlevels - if stat == 'fbar_obar' or stat == 'orate_frate' or stat == 'baser_frate': - if nindex == 1: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0,index1) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0,index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - index2 = len(stat_values_fbar.index.get_level_values(2).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0,index1,index2) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - index2 = len(stat_values_obar.index.get_level_values(2).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0,index1,index2) - ) - ) - stat_values_array = np.ma.array([stat_values_array_fbar, - stat_values_array_obar]) - else: - if nindex == 1: - index0 = len(stat_values.index.get_level_values(0).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0,index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - index2 = len(stat_values.index.get_level_values(2).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0,index1,index2) - ) - ) - return stat_values, stat_values_array, stat_plot_name - -def get_lead_avg_file(stat, input_filename, fcst_lead, output_base_dir): - lead_avg_filename = stat + '_' + os.path.basename(input_filename) - - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename - if f'fcst_lead{fcst_lead}' in lead_avg_filename: - lead_avg_filename = lead_avg_filename.replace(f'fcst_lead{fcst_lead}', 'fcst_lead_avgs') - lead_avg_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename - elif 'fcst_lead_avgs' not in input_filename: - lead_avg_filename = lead_avg_filename.replace(f'fcst_lead{fcst_lead}', '') - lead_avg_filename += '_fcst_lead_avgs.txt' - - lead_avg_file = os.path.join(output_base_dir, 'data', - lead_avg_filename) - return lead_avg_file - -def get_ci_file(stat, input_filename, fcst_lead, output_base_dir, ci_method): - CI_filename = stat + '_' + os.path.basename(input_filename) - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename - if f'fcst_lead{fcst_lead}' in CI_filename: - CI_filename = CI_filename.replace(f'fcst_lead{fcst_lead}', - 'fcst_lead_avgs') - - # if not and fcst_lead_avgs isn't already in filename, - # remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename - elif 'fcst_lead_avgs' not in CI_filename: - CI_filename = CI_filename.replace(f'fcst_lead{fcst_lead}', - '') - CI_filename += '_fcst_lead_avgs' - - CI_filename += '_CI_' + ci_method + '.txt' - - CI_file = os.path.join(output_base_dir, 'data', - CI_filename) - return CI_file From 8fb7a6f83d4f94c727c9603fbd4f8ffe40a62d4f Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:13:01 -0600 Subject: [PATCH 15/92] turn on StatAnalysis use cases to test after removing MakePlots ci-run-diff --- .github/parm/use_case_groups.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/parm/use_case_groups.json b/.github/parm/use_case_groups.json index 3bb687f2c8..d162a1a83e 100644 --- a/.github/parm/use_case_groups.json +++ b/.github/parm/use_case_groups.json @@ -7,7 +7,7 @@ { "category": "met_tool_wrapper", "index_list": "30-58", - "run": false + "run": true }, { "category": "air_quality_and_comp", @@ -52,7 +52,7 @@ { "category": "data_assimilation", "index_list": "0", - "run": false + "run": true }, { "category": "marine_and_cryosphere", @@ -82,7 +82,7 @@ { "category": "medium_range", "index_list": "3-5", - "run": false + "run": true }, { "category": "medium_range", @@ -117,7 +117,7 @@ { "category": "precipitation", "index_list": "3-7", - "run": true + "run": false }, { "category": "precipitation", @@ -162,7 +162,7 @@ { "category": "s2s_mid_lat", "index_list": "0-2", - "run": false + "run": true }, { "category": "s2s_mid_lat", From c039349172eaf8dd770107f6cd43a52de4596043 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:24:48 -0600 Subject: [PATCH 16/92] per #1843, removed plot_util tests because it has been removed --- .../plotting/plot_util/test_plot_util.py | 804 ------------------ 1 file changed, 804 deletions(-) delete mode 100644 internal/tests/pytests/plotting/plot_util/test_plot_util.py diff --git a/internal/tests/pytests/plotting/plot_util/test_plot_util.py b/internal/tests/pytests/plotting/plot_util/test_plot_util.py deleted file mode 100644 index 8fb49fad8c..0000000000 --- a/internal/tests/pytests/plotting/plot_util/test_plot_util.py +++ /dev/null @@ -1,804 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os -import sys -import datetime -import logging - -import numpy as np -import pandas as pd - - -METPLUS_BASE = os.getcwd().split('/internal')[0] -sys.path.append(METPLUS_BASE+'/ush/plotting_scripts') -import plot_util -logger = logging.getLogger('~/metplus_pytest_plot_util.log') - - -@pytest.mark.plotting -def test_get_date_arrays(): - # Independently test the creation of - # the date arrays, one used for plotting - # the other the expected dates in the - # MET .stat file format - # Test 1 - date_type = 'VALID' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000' - fcst_init_hour = '000000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '240000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 2 - date_type = 'VALID' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000, 060000, 120000, 180000' - fcst_init_hour = '000000, 060000, 120000, 180000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '480000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(hours=i) for i in range(0,120,6)] - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 3 - date_type = 'INIT' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000' - fcst_init_hour = '000000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '360000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - lead_hour_seconds = int(int(lead[:-4])) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - lead_offset = datetime.timedelta( - seconds=lead_hour_seconds + lead_min_seconds + lead_seconds - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - (date + lead_offset).strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 4 - date_type = 'INIT' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000, 060000, 120000, 180000' - fcst_init_hour = '000000, 060000, 120000, 180000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '120000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(hours=i) for i in range(0,120,6)] - ) - lead_hour_seconds = int(int(lead[:-4])) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - lead_offset = datetime.timedelta( - seconds=lead_hour_seconds + lead_min_seconds + lead_seconds - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - (date + lead_offset).strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - - -@pytest.mark.plotting -def test_format_thresh(): - # Independently test the formatting - # of thresholds - # Test 1 - thresh = '>=5' - expected_thresh_symbol = '>=5' - expected_thresh_letter = 'ge5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 2 - thresh = 'ge5' - expected_thresh_symbol = '>=5' - expected_thresh_letter = 'ge5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 3 - thresh = '>15' - expected_thresh_symbol = '>15' - expected_thresh_letter = 'gt15' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 4 - thresh = 'gt15' - expected_thresh_symbol = '>15' - expected_thresh_letter = 'gt15' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 5 - thresh = '==1' - expected_thresh_symbol = '==1' - expected_thresh_letter = 'eq1' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 6 - thresh = 'eq1' - expected_thresh_symbol = '==1' - expected_thresh_letter = 'eq1' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 7 - thresh = '!=0.5' - expected_thresh_symbol = '!=0.5' - expected_thresh_letter = 'ne0.5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 8 - thresh = 'ne0.5' - expected_thresh_symbol = '!=0.5' - expected_thresh_letter = 'ne0.5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 9 - thresh = '<=1000' - expected_thresh_symbol = '<=1000' - expected_thresh_letter = 'le1000' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 10 - thresh = 'le1000' - expected_thresh_symbol = '<=1000' - expected_thresh_letter = 'le1000' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 11 - thresh = '<0.001' - expected_thresh_symbol = '<0.001' - expected_thresh_letter = 'lt0.001' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 12 - thresh = 'lt0.001' - expected_thresh_symbol = '<0.001' - expected_thresh_letter = 'lt0.001' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - - -@pytest.mark.plotting -def test_get_stat_file_base_columns(): - # Independently test getting list - # of the base MET version .stat file columns - # Test 1 - met_version = '8.0' - expected_stat_file_base_columns = [ 'VERSION', 'MODEL', 'DESC', - 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', - 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_LEV', 'OBS_VAR', - 'OBS_LEV', 'OBTYPE', 'VX_MASK', - 'INTERP_MTHD', 'INTERP_PNTS', - 'FCST_THRESH', 'OBS_THRESH', - 'COV_THRESH', 'ALPHA', 'LINE_TYPE' ] - test_stat_file_base_columns = plot_util.get_stat_file_base_columns( - met_version - ) - assert(test_stat_file_base_columns == expected_stat_file_base_columns) - # Test 2 - met_version = '8.1' - expected_stat_file_base_columns = [ 'VERSION', 'MODEL', 'DESC', - 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', - 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_UNITS', 'FCST_LEV', - 'OBS_VAR', 'OBS_UNITS', 'OBS_LEV', - 'OBTYPE', 'VX_MASK', 'INTERP_MTHD', - 'INTERP_PNTS', 'FCST_THRESH', - 'OBS_THRESH', 'COV_THRESH', 'ALPHA', - 'LINE_TYPE' ] - test_stat_file_base_columns = plot_util.get_stat_file_base_columns( - met_version - ) - assert(test_stat_file_base_columns == expected_stat_file_base_columns) - - -@pytest.mark.plotting -def test_get_stat_file_line_type_columns(): - # Independently test getting list - # of the line type MET version .stat file columns - # Test 1 - met_version = '8.1' - line_type = 'SL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 2 - met_version = '8.1' - line_type = 'SAL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FABAR', 'OABAR', - 'FOABAR', 'FFABAR', 'OOABAR', - 'MAE' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 3 - met_version = '6.1' - line_type = 'VL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFBAR', 'VFBAR', - 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 4 - met_version = '8.1' - line_type = 'VL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFBAR', 'VFBAR', - 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR', - 'F_SPEED_BAR', 'O_SPEED_BAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 5 - met_version = '8.1' - line_type = 'VAL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFABAR', 'VFABAR', - 'UOABAR', 'VOABAR', 'UVFOABAR', - 'UVFFABAR', 'UVOOABAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - # Test 6 - met_version = '8.1' - line_type = 'VCNT' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FBAR', 'FBAR_NCL', - 'FBAR_NCU', 'OBAR', 'OBAR_NCL', - 'OBAR_NCU', 'FS_RMS', - 'FS_RMS_NCL', 'FS_RMS_NCU', - 'OS_RMS', 'OS_RMS_NCL', - 'OS_RMS_NCU', 'MSVE', 'MSVE_NCL', - 'MSVE_NCU', 'RMSVE', 'RMSVE_NCL', - 'RMSVE_NCU', 'FSTDEV', - 'FSTDEV_NCL', 'FSTDEV_NCU', - 'OSTDEV', 'OSTDEV_NCL', - 'OSTDEV_NCU', 'FDIR', 'FDIR_NCL', - 'FDIR_NCU', 'ODIR', 'ODIR_NCL', - 'ODIR_NCU', 'FBAR_SPEED', - 'FBAR_SPEED_NCL', - 'FBAR_SPEED_NCU', 'OBAR_SPEED', - 'OBAR_SPEED_NCL', - 'OBAR_SPEED_NCU', 'VDIFF_SPEED', - 'VDIFF_SPEED_NCL', - 'VDIFF_SPEED_NCU', 'VDIFF_DIR', - 'VDIFF_DIR_NCL', 'VDIFF_DIR_NCU', - 'SPEED_ERR', 'SPEED_ERR_NCL', - 'SPEED_ERR_NCU', 'SPEED_ABSERR', - 'SPEED_ABSERR_NCL', - 'SPEED_ABSERR_NCU', 'DIR_ERR', - 'DIR_ERR_NCL', 'DIR_ERR_NCU', - 'DIR_ABSERR', 'DIR_ABSERR_NCL', - 'DIR_ABSERR_NCU' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 7 - met_version = '8.1' - line_type = 'CTC' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FY_OY', 'FY_ON', - 'FN_OY', 'FN_ON' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - - -@pytest.mark.plotting -def get_clevels(): - # Independently test creating an array - # of levels centered about 0 to plot - # Test 1 - data = np.array([ 7.89643761, 2.98214969, 4.04690632, 1.1047872, - -3.42288272, 1.0111309, 8.02330262, -8.03515159, - -8.89454837, 2.45191295, 9.43015692, -0.53815455, - 4.34984478, 4.54528989, -1.35164646 ]) - expected_clevels = np.array([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10]) - test_clevels = plot_util.get_clevels(data) - assert(test_clevels == expected_clevels) - - -@pytest.mark.plotting -def test_calculate_average(): - # Independently test getting the average - # of a data array based on method - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_index = pd.MultiIndex.from_product( - [['MODEL_TEST'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_array = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_data = pd.DataFrame(model_data_array, index=model_data_index, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - stat_values_array = np.array([[[(5525.75062 - 5525.66493), - (5519.11108 - 5519.1014), - (5516.80228 - 5516.79513), - (5516.93924 - 5517.80544), - (5514.52274 - 5514.68224) - ]]]) - # Test 1 - average_method = 'MEAN' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([-0.184636]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 2 - average_method = 'MEDIAN' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([0.00715]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 3 - average_method = 'AGGREGATION' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([-0.184636]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 4 - stat_values_array = np.array([[[5525.75062, 5519.11108, - 5516.80228, 5516.93924, - 5514.52274]], - [[5525.66493, 5519.1014, - 5516.79513, 5517.80544, - 5514.68224 - ]]]) - average_method = 'MEAN' - stat = 'fbar_obar' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([5518.625192,5518.809828]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 5 - average_method = 'MEDIAN' - stat = 'fbar_obar' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([5516.93924, 5517.80544]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - - -@pytest.mark.long -def test_calculate_ci(): - pytest.skip("Takes far too long to run") - # Independently test getting the - # confidence interval between two data arrays - # based on method - randx_seed = np.random.seed(0) - # Test 1 - ci_method = 'EMC' - modelB_values = np.array([0.4983181, 0.63076339, 0.73753565, - 0.97960614, 0.74599612, 0.18829818, - 0.29490815, 0.5063043, 0.15074971, - 0.89009979, 0.81246532, 0.45399668, - 0.98247594, 0.38211414, 0.26690678]) - modelA_values = np.array([0.37520287, 0.89286092, 0.66785908, - 0.55742834, 0.60978346, 0.5760979, - 0.55055558, 0.00388764, 0.55821689, - 0.56042747, 0.30637593, 0.83325185, - 0.84098604, 0.04021844, 0.57214717]) - total_days = 15 - stat = 'bias' - average_method = 'MEAN' - randx = np.random.rand(10000, total_days) - expected_std = np.sqrt( - (( - (modelB_values - modelA_values) - - (modelB_values - modelA_values).mean() - )**2).mean() - ) - expected_intvl = 2.228*expected_std/np.sqrt(total_days-1) - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - # Test 2 - ci_method = 'EMC' - modelB_values = np.array([0.4983181, 0.63076339, 0.73753565, - 0.97960614, 0.74599612, 0.18829818, - 0.29490815, 0.5063043, 0.15074971, - 0.89009979, 0.81246532, 0.45399668, - 0.98247594, 0.38211414, 0.26690678, - 0.64162609, 0.01370935, 0.79477382, - 0.31573415, 0.35282921, 0.57511574, - 0.27815519, 0.49562973, 0.4859588, - 0.16461642, 0.75849444, 0.44332183, - 0.94935173, 0.62597888, 0.12819335]) - modelA_values = np.array([0.37520287, 0.89286092, 0.66785908, - 0.55742834, 0.60978346, 0.5760979, - 0.55055558, 0.00388764, 0.55821689, - 0.56042747, 0.30637593, 0.83325185, - 0.84098604, 0.04021844, 0.57214717, - 0.75091023, 0.47321941, 0.12862311, - 0.8644722, 0.92040807, 0.61376225, - 0.24347848, 0.69990467, 0.69711331, - 0.91866337, 0.63945963, 0.59999792, - 0.2920741, 0.64972479, 0.25025121]) - total_days = 30 - stat = 'bias' - average_method = 'MEAN' - randx = np.random.rand(10000, total_days) - expected_std = np.sqrt( - (( - (modelB_values - modelA_values) - - (modelB_values - modelA_values).mean() - )**2).mean() - ) - expected_intvl = 2.042*expected_std/np.sqrt(total_days-1) - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - # Test 3 - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_indexA = pd.MultiIndex.from_product( - [['MODEL_TESTA'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_arrayA = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_dataA = pd.DataFrame(model_data_arrayA, index=model_data_indexA, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - model_data_arrayB = np.array([ - [3600, 5527.43726, 5527.79714, 30635385.37277, 30633128.08035, - 30637667.9488, 3.74623], - [3600, 5520.22487, 5520.5867, 30562940.31742, 30560471.32084, - 30565442.31244, 4.17792], - [3600, 5518.16049, 5518.53379, 30538694.69234, 30536683.66886, - 30540732.11308, 3.86693], - [3600, 5519.20033, 5519.38443, 30545925.19732, 30544766.74602, - 30547108.75357, 3.7534], - [3600, 5515.78776, 5516.17552, 30509811.84136, 30507573.43899, - 30512077.12263, 4.02554] - ]) - model_data_indexB = pd.MultiIndex.from_product( - [['MODEL_TESTB'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_dataB = pd.DataFrame(model_data_arrayB, index=model_data_indexB, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - ci_method = 'EMC_MONTE_CARLO' - modelB_values = model_dataB - modelA_values = model_dataA - total_days = 5 - stat = 'bias' - average_method = 'AGGREGATION' - randx = np.random.rand(10000, total_days) - expected_intvl = 0.3893656076904014 - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - - -@pytest.mark.plotting -def test_get_stat_plot_name(): - # Independently test getting the - # a more formalized statistic name - # Test 1 - stat = 'bias' - expected_stat_plot_name = 'Bias' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 2 - stat = 'rmse_md' - expected_stat_plot_name = 'Root Mean Square Error from Mean Error' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 3 - stat = 'fbar_obar' - expected_stat_plot_name = 'Forecast and Observation Averages' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 4 - stat = 'acc' - expected_stat_plot_name = 'Anomaly Correlation Coefficient' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 5 - stat = 'vdiff_speed' - expected_stat_plot_name = 'Difference Vector Speed' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 6 - stat = 'baser' - expected_stat_plot_name = 'Base Rate' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 7 - stat = 'fbias' - expected_stat_plot_name = 'Frequency Bias' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - - -@pytest.mark.plotting -def test_calculate_stat(): - # Independently test calculating - # statistic values - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_index = pd.MultiIndex.from_product( - [['MODEL_TEST'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_array = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_data = pd.DataFrame(model_data_array, index=model_data_index, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - # Test 1 - stat = 'bias' - expected_stat_values_array = np.array([[[(5525.75062 - 5525.66493), - (5519.11108 - 5519.1014), - (5516.80228 - 5516.79513), - (5516.93924 - 5517.80544), - (5514.52274 - 5514.68224) - ]]]) - expected_stat_values = pd.Series(expected_stat_values_array[0,0,:], - index=model_data_index) - expected_stat_plot_name = 'Bias' - test_stat_values, test_stat_values_array, test_stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - assert(test_stat_values.equals(expected_stat_values)) - assert(len(test_stat_values_array[0,0,:]) == - len(expected_stat_values_array[0,0,:])) - for l in range(len(test_stat_values_array[0,0,:])): - assert(test_stat_values_array[0,0,l] == - expected_stat_values_array[0,0,l]) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 2 - stat = 'fbar_obar' - expected_stat_values_array = np.array([[[5525.75062, 5519.11108, - 5516.80228, 5516.93924, - 5514.52274]], - [[5525.66493, 5519.1014, - 5516.79513, 5517.80544, - 5514.68224 - ]]]) - expected_stat_values = pd.DataFrame(expected_stat_values_array[:,0,:].T, - index=model_data_index, - columns=[ 'FBAR', 'OBAR' ]) - expected_stat_plot_name = 'Forecast and Observation Averages' - test_stat_values, test_stat_values_array, test_stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - assert(test_stat_values.equals(expected_stat_values)) - assert(len(test_stat_values_array[0,0,:]) == - len(expected_stat_values_array[0,0,:])) - for l in range(len(test_stat_values_array[0,0,:])): - assert(test_stat_values_array[0,0,l] == - expected_stat_values_array[0,0,l]) - assert(len(test_stat_values_array[1,0,:]) == - len(expected_stat_values_array[1,0,:])) - for l in range(len(test_stat_values_array[1,0,:])): - assert(test_stat_values_array[1,0,l] == - expected_stat_values_array[1,0,l]) - assert(test_stat_plot_name == expected_stat_plot_name) From 3f73c7d1a0c34edd2ff0bbb309053b4d5adcafd9 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:28:17 -0600 Subject: [PATCH 17/92] removed glossary item that is no longer used --- docs/Users_Guide/wrappers.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 954eafc2fc..4b39c94cc9 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6870,8 +6870,6 @@ file for running with LOOP_ORDER = processes: | :term:`STAT_ANALYSIS_DUMP_ROW_TMPL` | :term:`STAT_ANALYSIS_OUT_STAT_TMPL` | :term:`PLOT_TIME` - | :term:`VERIF_CASE` - | :term:`VERIF_TYPE` | :term:`MODEL_NAME` | :term:`MODEL_OBS_NAME` | :term:`MODEL_NAME_ON_PLOT` From 4a9e3ca1e396a7d9f1ef5e8aca9372f91a6abf8b Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 30 Sep 2022 13:10:33 -0600 Subject: [PATCH 18/92] changed logic to format lists when they are read and added support for setting groups of list items that can be looped over, ci-run-diff --- .../stat_analysis/test_stat_analysis.py | 16 +- metplus/wrappers/stat_analysis_wrapper.py | 159 ++++++++++-------- 2 files changed, 95 insertions(+), 80 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 3f39bd876f..0a5ac4f3aa 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -30,10 +30,11 @@ def stat_analysis_wrapper(metplus_config): @pytest.mark.parametrize( 'input, expected_output', [ ('', []), - ('0,1,2,3', ['000000', '010000', '020000', '030000']), - ('01', ['010000']), - #('010000', ['010000']), - ('begin_end_incr(0,3,1)', ['000000', '010000', '020000', '030000']), + ('0,1,2,3', ['"000000"', '"010000"', '"020000"', '"030000"']), + ('01', ['"010000"']), + ('010000', ['"010000"']), + ('begin_end_incr(0,3,1)', ['"000000"', '"010000"', + '"020000"', '"030000"']), ] ) @pytest.mark.wrapper_d @@ -83,12 +84,13 @@ def test_create_c_dict(metplus_config): assert 'FCST_VALID_HOUR_LIST' in c_dict['LOOP_LIST_ITEMS'] assert 'MODEL_LIST' in c_dict['LOOP_LIST_ITEMS'] assert c_dict['VAR_LIST'] == [] - assert c_dict['MODEL_LIST'] == ['MODEL_TEST'] + assert c_dict['MODEL_LIST'] == ['"MODEL_TEST"'] assert c_dict['DESC_LIST'] == [] assert c_dict['FCST_LEAD_LIST'] == [] assert c_dict['OBS_LEAD_LIST'] == [] - assert c_dict['FCST_VALID_HOUR_LIST'] == ['000000'] - assert c_dict['FCST_INIT_HOUR_LIST'] == ['000000', '060000', '120000', '180000'] + assert c_dict['FCST_VALID_HOUR_LIST'] == ['"000000"'] + assert c_dict['FCST_INIT_HOUR_LIST'] == ['"000000"', '"060000"', + '"120000"', '"180000"'] assert c_dict['OBS_VALID_HOUR_LIST'] == [] assert c_dict['OBS_INIT_HOUR_LIST'] == [] assert c_dict['VX_MASK_LIST'] == [] diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 98a20efa2a..633a7bf368 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -188,34 +188,16 @@ def create_c_dict(self): job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') c_dict['JOBS'].append(f'-job {job_name} {job_args}') - # read in all lists except field lists, which will be read in afterwards and checked - all_lists_to_read = self.expected_config_lists + self.list_categories - non_field_lists = [conf_list for - conf_list in all_lists_to_read - if conf_list not in self.field_lists] - for conf_list in non_field_lists: - c_dict[conf_list] = getlist( - self.config.getstr('config', conf_list, '') - ) - - # if list in format lists, zero pad value to be at least 2 - # digits, then add 4 zeros - if conf_list in self.format_lists: - c_dict[conf_list] = ( - [value.zfill(2).ljust(4 + len(value.zfill(2)), '0') - for value in c_dict[conf_list]] - ) - - # read all field lists and check if they are all empty - c_dict['all_field_lists_empty'] = self.read_field_lists_from_config(c_dict) + # read all lists and check if field lists are all empty + c_dict['all_field_lists_empty'] = self.read_lists_from_config(c_dict) c_dict['VAR_LIST'] = parse_var_list(self.config) c_dict['MODEL_INFO_LIST'] = self.parse_model_info() if not c_dict['MODEL_LIST'] and c_dict['MODEL_INFO_LIST']: - self.logger.warning("MODEL_LIST was left blank, " - + "creating with MODELn information.") - for model_info in c_dict['MODEL_INFO_LIST']: - c_dict['MODEL_LIST'].append(model_info['name']) + self.logger.warning("MODEL_LIST was left blank, " + + "creating with MODELn information.") + for model_info in c_dict['MODEL_INFO_LIST']: + c_dict['MODEL_LIST'].append(model_info['name']) c_dict = self.set_lists_loop_or_group(c_dict) @@ -225,6 +207,56 @@ def create_c_dict(self): return self.c_dict_error_check(c_dict) + def _format_conf_list(self, conf_list): + items = getlist( + self.config.getraw('config', conf_list, '') + ) + + # if list if empty or unset, check for {LIST_NAME} + if not items: + indices = list( + find_indices_in_config_section(fr'{conf_list}(\d+)$', + self.config, + index_index=1).keys() + ) + if indices: + items = [] + for index in indices: + sub_items = getlist( + self.config.getraw('config', f'{conf_list}{index}') + ) + if not sub_items: + continue + + items.append(','.join(sub_items)) + + # do not add quotes and format thresholds if threshold list + if 'THRESH' in conf_list: + return [self.format_thresh(item) for item in items] + + if conf_list in self.list_categories: + return items + + formatted_items = [] + for item in items: + sub_items = [] + for sub_item in item.split(','): + # if list in format lists, zero pad value to be at least 2 + # digits, then add zeros to make 6 digits + if conf_list in self.format_lists: + sub_item = self._format_hms(sub_item) + sub_items.append(sub_item) + + # format list as string with quotes around each item + sub_item_str = '", "'.join(sub_items) + formatted_items.append(f'"{sub_item_str}"') + + return formatted_items + + @staticmethod + def _format_hms(value): + return value.zfill(2).ljust(6, '0') + def c_dict_error_check(self, c_dict): if not c_dict.get('CONFIG_FILE'): @@ -273,25 +305,25 @@ def c_dict_error_check(self, c_dict): return c_dict - def read_field_lists_from_config(self, field_dict): - """! Get field list configuration variables and add to dictionary - @param field_dict dictionary to hold output values - @returns True if all lists are empty or False if any have a value""" + def read_lists_from_config(self, c_dict): + """! Get list configuration variables and add to dictionary + + @param c_dict dictionary to hold output values + @returns True if all field lists are empty or False if any are set + """ all_empty = True - for field_list in self.field_lists: - if 'LEVEL_LIST' in field_list: - field_dict[field_list] = ( - self.get_level_list(field_list.split('_')[0]) + + all_lists_to_read = self.expected_config_lists + self.list_categories + for conf_list in all_lists_to_read: + if 'LEVEL_LIST' in conf_list: + c_dict[conf_list] = ( + self.get_level_list(conf_list.split('_')[0]) ) else: - field_dict[field_list] = getlist( - self.config.getstr('config', - field_list, - '') - ) + c_dict[conf_list] = self._format_conf_list(conf_list) - # keep track if any list is not empty - if field_dict[field_list]: + # keep track if any field list is not empty + if conf_list in self.field_lists and c_dict[conf_list]: all_empty = False return all_empty @@ -367,17 +399,17 @@ def set_lists_loop_or_group(self, c_dict): return c_dict - def format_thresh(self, thresh): + def format_thresh(self, thresh_str): """! Format thresholds for file naming Args: - @param thresh string of the thresholds. Can be a comma-separated list, i.e. gt3,<=5.5, ==7 + @param thresh_str string of the thresholds. Can be a comma-separated list, i.e. gt3,<=5.5, ==7 @returns string of comma-separated list of the threshold(s) with letter format, i.e. gt3, le5.5, eq7 """ formatted_thresh_list = [] # separate thresholds by comma and strip off whitespace around values - thresh_list = [thresh.strip() for thresh in thresh.split(',')] + thresh_list = [thresh.strip() for thresh in thresh_str.split(',')] for thresh in thresh_list: if not thresh: continue @@ -410,6 +442,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): date_type = self.c_dict['DATE_TYPE'] stringsub_dict_keys = [] + # TODO: combine these 2 for loops? for loop_list in lists_to_loop: list_name = loop_list.replace('_LIST', '') stringsub_dict_keys.append(list_name.lower()) @@ -565,12 +598,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): list_name_value = ( config_dict[list_name].replace('"', '').replace(' ', '') ) - # CHANGE: format thresh when it is read instead of here -# if 'THRESH' in list_name: -# stringsub_dict[list_name.lower()] = self.format_thresh( -# list_name_value -# ) -# elif list_name == 'MODEL': + if list_name == 'MODEL': stringsub_dict[list_name.lower()] = list_name_value stringsub_dict['obtype'] = ( @@ -620,7 +648,9 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict['init_hour'] = ( stringsub_dict['init_hour_end'] ) - elif 'LEAD' in list_name: + # if multiple leads are specified, do not format lead info + # this behavior is the same as if lead list is in group lists + elif 'LEAD' in list_name and len(list_name_value.split(',')) == 1: lead_timedelta = datetime.timedelta( hours=int(list_name_value[:-4]), minutes=int(list_name_value[-4:-2]), @@ -670,16 +700,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict[list_name].replace('"', '').replace(' ', '') .replace(',', '_').replace('*', 'ALL') ) - if 'THRESH' in list_name: - - thresh_letter = self.format_thresh( - config_dict[list_name] - ) - - stringsub_dict[list_name.lower()] = ( - thresh_letter.replace(',', '_').replace('*', 'ALL') - ) - elif 'HOUR' in list_name: + if 'HOUR' in list_name: list_name_values_list = ( config_dict[list_name].replace('"', '').split(', ') ) @@ -1133,7 +1154,7 @@ def get_level_list(self, data_type): level_list = [] level_input = getlist( - self.config.getstr('config', f'{data_type}_LEVEL_LIST', '') + self.config.getraw('config', f'{data_type}_LEVEL_LIST', '') ) for level in level_input: @@ -1237,23 +1258,14 @@ def get_runtime_settings(self, c_dict): # and its value as a string for group lists. for group_list in group_lists: runtime_setup_dict_name = group_list.replace('_LIST', '') - add_quotes = False if 'THRESH' in group_list else True - - formatted_list = c_dict[group_list] - runtime_setup_dict[runtime_setup_dict_name] = ( - [self.list_to_str(formatted_list, - add_quotes=add_quotes)] - ) + runtime_setup_dict[runtime_setup_dict_name] = [ + ', '.join(c_dict[group_list]) + ] # Fill setup dictionary for MET config variable name - # and its value as a list for loop lists. Some items - # in lists need to be formatted now, others done later. + # and its value as a list for loop lists. for loop_list in loop_lists: - # if not a threshold list, add quotes around each value in list - if 'THRESH' not in loop_list: - c_dict[loop_list] = [f'"{value}"' for value in c_dict[loop_list]] - runtime_setup_dict_name = loop_list.replace('_LIST', '') runtime_setup_dict[runtime_setup_dict_name] = ( c_dict[loop_list] @@ -1471,6 +1483,7 @@ def run_stat_analysis(self): """ runtime_settings_dict_list = self.get_runtime_settings_dict_list() if not runtime_settings_dict_list: + self.log_error('Could not get runtime settings dict list') return False self.run_stat_analysis_job(runtime_settings_dict_list) From e644194b29798e7aea00aaa8d09cecdb289663ba Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 10:32:51 -0600 Subject: [PATCH 19/92] per #1842, add quotation marks around name/level/units that were read from the FCST/OBS_VAR variables and around level values read from FCST/OBS_LEVEL_LIST, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 633a7bf368..04db291056 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1162,7 +1162,7 @@ def get_level_list(self, data_type): level = f'{remove_quotes(level)}' level_list.append(level) - return level_list + return [f'"{item}"' for item in level_list] def process_job_args(self, job_type, job, model_info, lists_to_loop_items, lists_to_group_items, runtime_settings_dict): @@ -1344,16 +1344,16 @@ def get_c_dict_list(self): c_dict = {} c_dict['index'] = var_info['index'] c_dict['FCST_VAR_LIST'] = [ - var_info['fcst_name'] + f'"{var_info["fcst_name"]}"' ] c_dict['OBS_VAR_LIST'] = [ - var_info['obs_name'] + f'"{var_info["obs_name"]}"' ] c_dict['FCST_LEVEL_LIST'] = [ - var_info['fcst_level'] + f'"{var_info["fcst_level"]}"' ] c_dict['OBS_LEVEL_LIST'] = [ - var_info['obs_level'] + f'"{var_info["obs_level"]}"' ] c_dict['FCST_THRESH_LIST'] = [] @@ -1369,9 +1369,9 @@ def get_c_dict_list(self): c_dict['FCST_UNITS_LIST'] = [] c_dict['OBS_UNITS_LIST'] = [] if fcst_units: - c_dict['FCST_UNITS_LIST'].append(fcst_units) + c_dict['FCST_UNITS_LIST'].append(f'"{fcst_units}"') if obs_units: - c_dict['OBS_UNITS_LIST'].append(obs_units) + c_dict['OBS_UNITS_LIST'].append(f'"{obs_units}"') c_dict['run_fourier'] = run_fourier if pair: @@ -1391,8 +1391,7 @@ def add_other_lists_to_c_dict(self, c_dict): @param c_dict dictionary to add values to """ # add group and loop lists - lists_to_add = self.list_categories - for list_category in lists_to_add: + for list_category in self.list_categories: list_items = self.c_dict[list_category] if list_category not in c_dict: c_dict[list_category] = list_items From 3bc2e88b087b96a384b22eedf63eb4dafd58f887 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 10:58:08 -0600 Subject: [PATCH 20/92] fixed unit test to match change to get_level_list function that now adds quotation marks to format the values instead of handling that later in the processing --- .../wrappers/stat_analysis/test_stat_analysis.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 0a5ac4f3aa..a082ac2abd 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -765,14 +765,14 @@ def test_run_stat_analysis(metplus_config): @pytest.mark.parametrize( 'data_type, config_list, expected_list', [ - ('FCST', '\"0,*,*\"', ["0,*,*"]), - ('FCST', '\"(0,*,*)\"', ["0,*,*"]), - ('FCST', '\"0,*,*\", \"1,*,*\"', ["0,*,*", "1,*,*"]), - ('FCST', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), - ('OBS', '\"0,*,*\"', ["0,*,*"]), - ('OBS', '\"(0,*,*)\"', ["0,*,*"]), - ('OBS', '\"0,*,*\", \"1,*,*\"', ["0,*,*", "1,*,*"]), - ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), + ('FCST', '\"0,*,*\"', ['"0,*,*"']), + ('FCST', '\"(0,*,*)\"', ['"0,*,*"']), + ('FCST', '\"0,*,*\", \"1,*,*\"', ['"0,*,*"', '"1,*,*"']), + ('FCST', '\"(0,*,*)\", \"(1,*,*)\"', ['"0,*,*"', '"1,*,*"']), + ('OBS', '\"0,*,*\"', ['"0,*,*"']), + ('OBS', '\"(0,*,*)\"', ['"0,*,*"']), + ('OBS', '\"0,*,*\", \"1,*,*\"', ['"0,*,*"', '"1,*,*"']), + ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ['"0,*,*"', '"1,*,*"']), ] ) @pytest.mark.wrapper_d From d97ceb97687d3292207e04509217f7e8aa18535e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:59:46 -0600 Subject: [PATCH 21/92] per #1842, change logic to handle padding HHMMSS values to still support HHHMMSS (3 digits of hour). Updated tests to handle example from removed configuration file in internal/tests/pytests/plotting --- .../pytests/wrappers/stat_analysis/test_stat_analysis.py | 5 +++-- metplus/wrappers/stat_analysis_wrapper.py | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index a082ac2abd..53bf25e1c1 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -32,16 +32,17 @@ def stat_analysis_wrapper(metplus_config): ('', []), ('0,1,2,3', ['"000000"', '"010000"', '"020000"', '"030000"']), ('01', ['"010000"']), - ('010000', ['"010000"']), ('begin_end_incr(0,3,1)', ['"000000"', '"010000"', '"020000"', '"030000"']), + ('24, 48, 72, 96, 120, 144, 168, 192, 216, 240', + ['"240000"', '"480000"', '"720000"', '"960000"', '"1200000"', + '"1440000"', '"1680000"', '"1920000"', '"2160000"', '"2400000"']), ] ) @pytest.mark.wrapper_d def test_handle_format_lists(metplus_config, input, expected_output): config = metplus_config([TEST_CONF]) config.set('config', 'FCST_LEAD_LIST', input) - config.set('config', 'LOOP_LIST_ITEMS', 'FCST_LEAD_LIST') wrapper = StatAnalysisWrapper(config) assert wrapper.c_dict['FCST_LEAD_LIST'] == expected_output diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 04db291056..36d075e622 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -255,7 +255,8 @@ def _format_conf_list(self, conf_list): @staticmethod def _format_hms(value): - return value.zfill(2).ljust(6, '0') + padded_value = value.zfill(2) + return padded_value.ljust(len(padded_value) + 4, '0') def c_dict_error_check(self, c_dict): @@ -399,7 +400,8 @@ def set_lists_loop_or_group(self, c_dict): return c_dict - def format_thresh(self, thresh_str): + @staticmethod + def format_thresh(thresh_str): """! Format thresholds for file naming Args: From ce3a33c6fd6dd8565db9bfa51db119ac608d2949 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 14:53:15 -0600 Subject: [PATCH 22/92] clean up logic that loops over 2 lists and performs the same action on each, add comments to describe the intended behavior --- metplus/wrappers/stat_analysis_wrapper.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 36d075e622..a372237ef3 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -371,7 +371,8 @@ def set_lists_loop_or_group(self, c_dict): together) and lists_to_loop_items (list of all the list names whose items are being looped over) """ - # get list of config variables not found in either GROUP_LIST_ITEMS or LOOP_LIST_ITEMS + # get list of config variables not found in either + # GROUP_LIST_ITEMS or LOOP_LIST_ITEMS missing_config_list = [conf for conf in self.expected_config_lists if conf not in c_dict['GROUP_LIST_ITEMS']] missing_config_list = [conf for conf in missing_config_list @@ -380,8 +381,8 @@ def set_lists_loop_or_group(self, c_dict): if conf not in missing_config_list] # loop through lists not found in either loop or group lists + # add missing lists to group_lists for missing_config in missing_config_list: - c_dict['GROUP_LIST_ITEMS'].append(missing_config) # loop through lists found in either loop or group lists originally @@ -444,13 +445,10 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): date_type = self.c_dict['DATE_TYPE'] stringsub_dict_keys = [] - # TODO: combine these 2 for loops? - for loop_list in lists_to_loop: - list_name = loop_list.replace('_LIST', '') - stringsub_dict_keys.append(list_name.lower()) - for group_list in lists_to_group: - list_name = group_list.replace('_LIST', '') - stringsub_dict_keys.append(list_name.lower()) + # add all loop list and group list items to string sub keys list + for list_item in lists_to_loop + lists_to_group: + list_name = list_item.replace('_LIST', '').lower() + stringsub_dict_keys.append(list_name) special_keys = [ 'fcst_valid_hour_beg', 'fcst_valid_hour_end', From 4647b07e28490d48ab83a9924515434b720fdfbc Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 14:53:31 -0600 Subject: [PATCH 23/92] reordered functions to better follow flow of logic --- metplus/wrappers/stat_analysis_wrapper.py | 102 +++++++++++----------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index a372237ef3..572b4a12ef 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -207,57 +207,6 @@ def create_c_dict(self): return self.c_dict_error_check(c_dict) - def _format_conf_list(self, conf_list): - items = getlist( - self.config.getraw('config', conf_list, '') - ) - - # if list if empty or unset, check for {LIST_NAME} - if not items: - indices = list( - find_indices_in_config_section(fr'{conf_list}(\d+)$', - self.config, - index_index=1).keys() - ) - if indices: - items = [] - for index in indices: - sub_items = getlist( - self.config.getraw('config', f'{conf_list}{index}') - ) - if not sub_items: - continue - - items.append(','.join(sub_items)) - - # do not add quotes and format thresholds if threshold list - if 'THRESH' in conf_list: - return [self.format_thresh(item) for item in items] - - if conf_list in self.list_categories: - return items - - formatted_items = [] - for item in items: - sub_items = [] - for sub_item in item.split(','): - # if list in format lists, zero pad value to be at least 2 - # digits, then add zeros to make 6 digits - if conf_list in self.format_lists: - sub_item = self._format_hms(sub_item) - sub_items.append(sub_item) - - # format list as string with quotes around each item - sub_item_str = '", "'.join(sub_items) - formatted_items.append(f'"{sub_item_str}"') - - return formatted_items - - @staticmethod - def _format_hms(value): - padded_value = value.zfill(2) - return padded_value.ljust(len(padded_value) + 4, '0') - def c_dict_error_check(self, c_dict): if not c_dict.get('CONFIG_FILE'): @@ -329,6 +278,57 @@ def read_lists_from_config(self, c_dict): return all_empty + def _format_conf_list(self, conf_list): + items = getlist( + self.config.getraw('config', conf_list, '') + ) + + # if list if empty or unset, check for {LIST_NAME} + if not items: + indices = list( + find_indices_in_config_section(fr'{conf_list}(\d+)$', + self.config, + index_index=1).keys() + ) + if indices: + items = [] + for index in indices: + sub_items = getlist( + self.config.getraw('config', f'{conf_list}{index}') + ) + if not sub_items: + continue + + items.append(','.join(sub_items)) + + # do not add quotes and format thresholds if threshold list + if 'THRESH' in conf_list: + return [self.format_thresh(item) for item in items] + + if conf_list in self.list_categories: + return items + + formatted_items = [] + for item in items: + sub_items = [] + for sub_item in item.split(','): + # if list in format lists, zero pad value to be at least 2 + # digits, then add zeros to make 6 digits + if conf_list in self.format_lists: + sub_item = self._format_hms(sub_item) + sub_items.append(sub_item) + + # format list as string with quotes around each item + sub_item_str = '", "'.join(sub_items) + formatted_items.append(f'"{sub_item_str}"') + + return formatted_items + + @staticmethod + def _format_hms(value): + padded_value = value.zfill(2) + return padded_value.ljust(len(padded_value) + 4, '0') + @staticmethod def list_to_str(list_of_values, add_quotes=True): """! Turn a list of values into a single string so it can be From d350c61036ef05d05e0dfec5adb8aea8268ab7e6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 14:58:13 -0600 Subject: [PATCH 24/92] moved list in function to class variable and capitalized all constant class variables --- metplus/wrappers/stat_analysis_wrapper.py | 69 ++++++++++++----------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 572b4a12ef..883d537356 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -67,7 +67,7 @@ class StatAnalysisWrapper(CommandBuilder): 'METPLUS_HSS_EC_VALUE', ] - field_lists = [ + FIELD_LISTS = [ 'FCST_VAR_LIST', 'OBS_VAR_LIST', 'FCST_UNITS_LIST', @@ -78,7 +78,7 @@ class StatAnalysisWrapper(CommandBuilder): 'OBS_LEVEL_LIST', ] - format_lists = [ + FORMAT_LISTS = [ 'FCST_VALID_HOUR_LIST', 'FCST_INIT_HOUR_LIST', 'OBS_VALID_HOUR_LIST', @@ -87,7 +87,7 @@ class StatAnalysisWrapper(CommandBuilder): 'OBS_LEAD_LIST', ] - expected_config_lists = [ + EXPECTED_CONFIG_LISTS = [ 'MODEL_LIST', 'DESC_LIST', 'VX_MASK_LIST', @@ -96,9 +96,29 @@ class StatAnalysisWrapper(CommandBuilder): 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST', - ] + format_lists + field_lists - - list_categories = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] + ] + FORMAT_LISTS + FIELD_LISTS + + LIST_CATEGORIES = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] + + STRING_SUB_SPECIAL_KEYS = [ + 'fcst_valid_hour_beg', 'fcst_valid_hour_end', + 'fcst_init_hour_beg', 'fcst_init_hour_end', + 'obs_valid_hour_beg', 'obs_valid_hour_end', + 'obs_init_hour_beg', 'obs_init_hour_end', + 'valid_hour', 'valid_hour_beg', 'valid_hour_end', + 'init_hour', 'init_hour_beg', 'init_hour_end', + 'fcst_valid', 'fcst_valid_beg', 'fcst_valid_end', + 'fcst_init', 'fcst_init_beg', 'fcst_init_end', + 'obs_valid', 'obs_valid_beg', 'obs_valid_end', + 'obs_init', 'obs_init_beg', 'obs_init_end', + 'valid', 'valid_beg', 'valid_end', + 'init', 'init_beg', 'init_end', + 'fcst_lead_hour', 'fcst_lead_min', + 'fcst_lead_sec', 'fcst_lead_totalsec', + 'obs_lead_hour', 'obs_lead_min', + 'obs_lead_sec', 'obs_lead_totalsec', + 'lead', 'lead_hour', 'lead_min', 'lead_sec', 'lead_totalsec' + ] def __init__(self, config, instance=None): self.app_path = os.path.join(config.getdir('MET_BIN_DIR', ''), @@ -230,7 +250,7 @@ def c_dict_error_check(self, c_dict): "Must set at least one job with STAT_ANALYSIS_JOB" ) - for conf_list in self.list_categories: + for conf_list in self.LIST_CATEGORIES: if not c_dict[conf_list]: self.log_error(f"Must set {conf_list} to run StatAnalysis") @@ -263,7 +283,7 @@ def read_lists_from_config(self, c_dict): """ all_empty = True - all_lists_to_read = self.expected_config_lists + self.list_categories + all_lists_to_read = self.EXPECTED_CONFIG_LISTS + self.LIST_CATEGORIES for conf_list in all_lists_to_read: if 'LEVEL_LIST' in conf_list: c_dict[conf_list] = ( @@ -273,7 +293,7 @@ def read_lists_from_config(self, c_dict): c_dict[conf_list] = self._format_conf_list(conf_list) # keep track if any field list is not empty - if conf_list in self.field_lists and c_dict[conf_list]: + if conf_list in self.FIELD_LISTS and c_dict[conf_list]: all_empty = False return all_empty @@ -305,7 +325,7 @@ def _format_conf_list(self, conf_list): if 'THRESH' in conf_list: return [self.format_thresh(item) for item in items] - if conf_list in self.list_categories: + if conf_list in self.LIST_CATEGORIES: return items formatted_items = [] @@ -314,7 +334,7 @@ def _format_conf_list(self, conf_list): for sub_item in item.split(','): # if list in format lists, zero pad value to be at least 2 # digits, then add zeros to make 6 digits - if conf_list in self.format_lists: + if conf_list in self.FORMAT_LISTS: sub_item = self._format_hms(sub_item) sub_items.append(sub_item) @@ -373,11 +393,11 @@ def set_lists_loop_or_group(self, c_dict): """ # get list of config variables not found in either # GROUP_LIST_ITEMS or LOOP_LIST_ITEMS - missing_config_list = [conf for conf in self.expected_config_lists + missing_config_list = [conf for conf in self.EXPECTED_CONFIG_LISTS if conf not in c_dict['GROUP_LIST_ITEMS']] missing_config_list = [conf for conf in missing_config_list if conf not in c_dict['LOOP_LIST_ITEMS']] - found_config_list = [conf for conf in self.expected_config_lists + found_config_list = [conf for conf in self.EXPECTED_CONFIG_LISTS if conf not in missing_config_list] # loop through lists not found in either loop or group lists @@ -450,27 +470,8 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): list_name = list_item.replace('_LIST', '').lower() stringsub_dict_keys.append(list_name) - special_keys = [ - 'fcst_valid_hour_beg', 'fcst_valid_hour_end', - 'fcst_init_hour_beg', 'fcst_init_hour_end', - 'obs_valid_hour_beg', 'obs_valid_hour_end', - 'obs_init_hour_beg', 'obs_init_hour_end', - 'valid_hour', 'valid_hour_beg', 'valid_hour_end', - 'init_hour', 'init_hour_beg', 'init_hour_end', - 'fcst_valid', 'fcst_valid_beg', 'fcst_valid_end', - 'fcst_init', 'fcst_init_beg', 'fcst_init_end', - 'obs_valid', 'obs_valid_beg', 'obs_valid_end', - 'obs_init', 'obs_init_beg', 'obs_init_end', - 'valid', 'valid_beg', 'valid_end', - 'init', 'init_beg', 'init_end', - 'fcst_lead_hour', 'fcst_lead_min', - 'fcst_lead_sec', 'fcst_lead_totalsec', - 'obs_lead_hour', 'obs_lead_min', - 'obs_lead_sec', 'obs_lead_totalsec', - 'lead', 'lead_hour', 'lead_min', 'lead_sec', 'lead_totalsec' - ] # create a dictionary of empty string values from the special keys - for special_key in special_keys: + for special_key in self.STRING_SUB_SPECIAL_KEYS: stringsub_dict_keys.append(special_key) stringsub_dict = dict.fromkeys(stringsub_dict_keys, '') @@ -1391,7 +1392,7 @@ def add_other_lists_to_c_dict(self, c_dict): @param c_dict dictionary to add values to """ # add group and loop lists - for list_category in self.list_categories: + for list_category in self.LIST_CATEGORIES: list_items = self.c_dict[list_category] if list_category not in c_dict: c_dict[list_category] = list_items From 2097047a707e0c94b8265f0e772ed6c0bb64073e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 15:16:42 -0600 Subject: [PATCH 25/92] clean up formatting of test code --- metplus/wrappers/stat_analysis_wrapper.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 883d537356..5863801a9f 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -372,6 +372,15 @@ def list_to_str(list_of_values, add_quotes=True): return ', '.join(list_of_values) + @staticmethod + def str_to_list(string_value, sort_list=False): + # remove double quotes and split by comma + str_list = string_value.replace('"', '').split(',') + str_list = [item.strip() for item in str_list] + if sort_list: + str_list.sort() + return str_list + def set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file should treat the items in that list as a group or items @@ -479,12 +488,11 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): fcst_hour_list = config_dict['FCST_'+date_type+'_HOUR'] obs_hour_list = config_dict['OBS_' + date_type + '_HOUR'] if fcst_hour_list: - fcst_hour_list = [fhr.strip() for fhr in fcst_hour_list.replace('"', '').split(',')] + fcst_hour_list = self.str_to_list(fcst_hour_list, sort_list=True) if obs_hour_list: - obs_hour_list = [fhr.strip() for fhr in obs_hour_list.replace('"', '').split(',')] + obs_hour_list = self.str_to_list(obs_hour_list, sort_list=True) # if fcst hour list is set, set fcst_{data_type}_beg/end with first and last values - # TODO: values should be sorted first if fcst_hour_list: stringsub_dict['fcst_'+date_type.lower()+'_beg'] = ( datetime.datetime.strptime( From 504b9900e1fa04aa2d4353ab58ce89d80eb8e190 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 15:17:30 -0600 Subject: [PATCH 26/92] clean up formatting of test code --- .../stat_analysis/test_stat_analysis.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 53bf25e1c1..695848a897 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -127,18 +127,18 @@ def test_set_lists_as_loop_or_group(metplus_config): # and those not set are set to GROUP_LIST_ITEMS st = stat_analysis_wrapper(metplus_config) # Test 1 - expected_lists_to_group_items = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'FCST_LEAD_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', - 'OBS_VAR_LIST', 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - expected_lists_to_loop_items = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + expected_lists_to_group_items = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', + 'FCST_LEAD_LIST', 'OBS_LEAD_LIST', + 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', + 'OBS_VAR_LIST', 'FCST_UNITS_LIST', + 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', + 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', + 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', + 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + expected_lists_to_loop_items = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] config_dict = {} config_dict['LOOP_ORDER'] = 'times' config_dict['PROCESS_LIST'] = 'StatAnalysis' @@ -146,8 +146,8 @@ def test_set_lists_as_loop_or_group(metplus_config): 'PARM_BASE/grid_to_grid/met_config/STATAnalysisConfig' ) config_dict['OUTPUT_DIR'] = 'OUTPUT_BASE/stat_analysis' - config_dict['GROUP_LIST_ITEMS'] = [ 'FCST_INIT_HOUR_LIST' ] - config_dict['LOOP_LIST_ITEMS'] = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST'] + config_dict['GROUP_LIST_ITEMS'] = ['FCST_INIT_HOUR_LIST'] + config_dict['LOOP_LIST_ITEMS'] = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] config_dict['FCST_VAR_LIST'] = [] config_dict['OBS_VAR_LIST'] = [] config_dict['FCST_LEVEL_LIST'] = [] @@ -156,12 +156,12 @@ def test_set_lists_as_loop_or_group(metplus_config): config_dict['OBS_UNITS_LIST'] = [] config_dict['FCST_THRESH_LIST'] = [] config_dict['OBS_THRESH_LIST'] = [] - config_dict['MODEL_LIST'] = [ 'MODEL_TEST' ] + config_dict['MODEL_LIST'] = ['MODEL_TEST'] config_dict['DESC_LIST'] = [] config_dict['FCST_LEAD_LIST'] = [] config_dict['OBS_LEAD_LIST'] = [] - config_dict['FCST_VALID_HOUR_LIST'] = [ '00', '06', '12', '18'] - config_dict['FCST_INIT_HOUR_LIST'] = [ '00', '06', '12', '18'] + config_dict['FCST_VALID_HOUR_LIST'] = ['00', '06', '12', '18'] + config_dict['FCST_INIT_HOUR_LIST'] = ['00', '06', '12', '18'] config_dict['OBS_VALID_HOUR_LIST'] = [] config_dict['OBS_INIT_HOUR_LIST'] = [] config_dict['VX_MASK_LIST'] = [] From 86ac6d9cd89f6bba7f8795f3c4691866de4bbf89 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 3 Oct 2022 17:12:47 -0600 Subject: [PATCH 27/92] per #1842, moved logic to set fcst/obs beg/end string template sub values into helper function to clean up logic and make it more readable, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 223 ++++++++++------------ 1 file changed, 99 insertions(+), 124 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 5863801a9f..5c54689c07 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -469,141 +469,27 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): information to pass to the string_template_substitution """ - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] date_type = self.c_dict['DATE_TYPE'] - stringsub_dict_keys = [] + stringsub_dict = {} # add all loop list and group list items to string sub keys list for list_item in lists_to_loop + lists_to_group: list_name = list_item.replace('_LIST', '').lower() - stringsub_dict_keys.append(list_name) + stringsub_dict[list_name] = '' # create a dictionary of empty string values from the special keys for special_key in self.STRING_SUB_SPECIAL_KEYS: - stringsub_dict_keys.append(special_key) - stringsub_dict = dict.fromkeys(stringsub_dict_keys, '') - - # Set full date information - fcst_hour_list = config_dict['FCST_'+date_type+'_HOUR'] - obs_hour_list = config_dict['OBS_' + date_type + '_HOUR'] - if fcst_hour_list: - fcst_hour_list = self.str_to_list(fcst_hour_list, sort_list=True) - if obs_hour_list: - obs_hour_list = self.str_to_list(obs_hour_list, sort_list=True) - - # if fcst hour list is set, set fcst_{data_type}_beg/end with first and last values - if fcst_hour_list: - stringsub_dict['fcst_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+fcst_hour_list[0], '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['fcst_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_end+fcst_hour_list[-1], '%Y%m%d%H%M%S' - ) - ) - if (stringsub_dict['fcst_'+date_type.lower()+'_beg'] - == stringsub_dict['fcst_'+date_type.lower()+'_end']): - stringsub_dict['fcst_'+date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hour list is not set, use date beg 000000-235959 as fcst_{date_type}_beg/end - #TODO: should be date beg 000000 and date end 235959? - else: - stringsub_dict['fcst_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+'000000', '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['fcst_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_beg+'235959', '%Y%m%d%H%M%S' - ) - ) - # if obs hour list is set, set obs_{data_type}_beg/end with first and last values - # TODO: values should be sorted first - # TODO: this could be made into function to handle fcst and obs - if obs_hour_list: - stringsub_dict['obs_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+obs_hour_list[0], '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['obs_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_end+obs_hour_list[-1], '%Y%m%d%H%M%S' - ) - ) - if (stringsub_dict['obs_'+date_type.lower()+'_beg'] - == stringsub_dict['obs_'+date_type.lower()+'_end']): - stringsub_dict['obs_'+date_type.lower()] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] - ) - # if obs hour list is not set, use date beg 000000-235959 as obs_{date_type}_beg/end - #TODO: should be date beg 000000 and date end 235959? - else: - stringsub_dict['obs_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+'000000', '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['obs_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_beg+'235959', '%Y%m%d%H%M%S' - ) - ) - # if fcst and obs hour lists the same, set {date_type}_beg/end to fcst_{date_type}_beg/end - if fcst_hour_list == obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_end'] - ) - # if {date_type} beg and end are the same, set {date_type} - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hr list is not set but obs hr list is, set {date_type}_beg/end to fcst_{date_type}_beg/end - # TODO: should be elif? - if fcst_hour_list and not obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_end'] - ) - # if {date_type} beg and end are the same, set {date_type} (same as above) - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hr list is set but obs hr list is not, set {date_type}_beg/end to obs_{date_type}_beg/end - # TODO: should be elif? - if not fcst_hour_list and obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] - ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['obs_'+date_type.lower()+'_end'] - ) - # if {date_type} beg and end are the same, set {date_type} (same as above twice) - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] - ) - # if neither fcst or obs hr list are set, {date_type}_beg/end are not set at all (empty string) - # also {date_type} is not set + stringsub_dict[special_key] = '' + + # Set string sub info from fcst/obs hour lists + self._set_stringsub_hours(stringsub_dict, + config_dict[f'FCST_{date_type}_HOUR'], + config_dict[f'OBS_{date_type}_HOUR']) # Set loop information for loop_list in lists_to_loop: list_name = loop_list.replace('_LIST', '') + # TODO: change commas to underscores if loop item contains a list list_name_value = ( config_dict[list_name].replace('"', '').replace(' ', '') ) @@ -808,7 +694,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): else: stringsub_dict[list_name.lower()] = list_name_value - nkeys_end = len(stringsub_dict_keys) + #nkeys_end = len(stringsub_dict_keys) # Some lines for debugging if needed in future #self.logger.info(nkeys_start) #self.logger.info(nkeys_end) @@ -816,6 +702,95 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): # self.logger.info("{} ({})".format(key, value)) return stringsub_dict + def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): + """! Set string sub dictionary _beg and _end values for fcst and obs. + Set other values depending on values set in fcst and obs hour lists. + Values that are set depend on what it set in c_dict DATE_TYPE, which + is either INIT or VALID. + + @param sub_dict dictionary to set string sub values + @param fcst_hour_str string with list of forecast hours to process + @param obs_hour_str string with list of observation hours to process + """ + date_type = self.c_dict['DATE_TYPE'].lower() + if fcst_hour_str: + fcst_hour_list = self.str_to_list(fcst_hour_str, sort_list=True) + else: + fcst_hour_list = None + + if obs_hour_str: + obs_hour_list = self.str_to_list(obs_hour_str, sort_list=True) + else: + obs_hour_list = None + + self._set_stringsub_hours_item(sub_dict, 'fcst', fcst_hour_list) + self._set_stringsub_hours_item(sub_dict, 'obs', obs_hour_list) + + # if fcst and obs hour lists the same or if fcst is set but not obs, + # set {date_type}_beg/end to fcst_{date_type}_beg/end + if (fcst_hour_list == obs_hour_list or + (fcst_hour_list and not obs_hour_list)): + sub_dict[f'{date_type}_beg'] = sub_dict[f'fcst_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'fcst_{date_type}_end'] + + # if {date_type} beg and end are the same, set {date_type} + if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: + sub_dict[date_type] = sub_dict[f'{date_type}_end'] + + # if fcst hr list is set but obs hr list is not, + # set {date_type}_beg/end to obs_{date_type}_beg/end + elif not fcst_hour_list and obs_hour_list: + sub_dict[f'{date_type}_beg'] = sub_dict[f'obs_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'obs_{date_type}_end'] + + # if {date_type} beg and end are the same, set {date_type} + if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: + sub_dict[date_type] = sub_dict[f'{date_type}_beg'] + + # if neither fcst or obs hr list are set, + # {date_type}_beg/end and {date_type} are not set at all (empty string) + + def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): + """! Set either fcst or obs values in string sub dictionary. + + @param sub_dict dictionary to set string sub values + @param fcst_or_obs string to note processing either fcst or obs + @param hour_list list of fcst or obs hours + """ + date_beg = self.c_dict['DATE_BEG'] + date_end = self.c_dict['DATE_END'] + prefix = f"{fcst_or_obs}_{self.c_dict['DATE_TYPE'].lower()}" + + if hour_list: + sub_dict[f'{prefix}_beg'] = ( + datetime.datetime.strptime( + date_beg + hour_list[0], '%Y%m%d%H%M%S' + ) + ) + sub_dict[f'{prefix}_end'] = ( + datetime.datetime.strptime( + date_end + hour_list[-1], '%Y%m%d%H%M%S' + ) + ) + if sub_dict[f'{prefix}_beg'] == sub_dict[f'{prefix}_end']: + sub_dict[prefix] = sub_dict[f'{prefix}_beg'] + + return + + # if fcst hour list is not set, use date beg 000000-235959 as + # fcst_{date_type}_beg/end + # TODO: should be date beg 000000 and date end 235959? + sub_dict[f'{prefix}_beg'] = ( + datetime.datetime.strptime( + date_beg + '000000', '%Y%m%d%H%M%S' + ) + ) + sub_dict[f'{prefix}_end'] = ( + datetime.datetime.strptime( + date_beg + '235959', '%Y%m%d%H%M%S' + ) + ) + def get_output_filename(self, output_type, filename_template, filename_type, lists_to_loop, lists_to_group, config_dict): From 92b2def45718f3ac4fd3e35d97b05f8a29dbe5db Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 4 Oct 2022 17:22:56 -0600 Subject: [PATCH 28/92] refactored wrapper to be more readable, use relativedelta for hour/lead offsets to be consistent with other wrappers, ci-run-diff --- .../stat_analysis/test_stat_analysis.py | 136 +++++++++--------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 695848a897..9e06d2a4b8 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -4,6 +4,7 @@ import os import datetime +from dateutil.relativedelta import relativedelta from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper from metplus.util import handle_tmp_dir @@ -26,25 +27,24 @@ def stat_analysis_wrapper(metplus_config): handle_tmp_dir(config) return StatAnalysisWrapper(config) - @pytest.mark.parametrize( - 'input, expected_output', [ + 'input_str, expected_output', [ ('', []), - ('0,1,2,3', ['"000000"', '"010000"', '"020000"', '"030000"']), - ('01', ['"010000"']), - ('begin_end_incr(0,3,1)', ['"000000"', '"010000"', - '"020000"', '"030000"']), + ('0,1,2,3', ['000000', '010000', '020000', '030000']), + ('12, 24', ['120000', '240000']), + ('196', ['1960000']), + ('12H, 24H', ['120000', '240000']), + ('45M', ['004500']), + ('42S', ['000042']), ('24, 48, 72, 96, 120, 144, 168, 192, 216, 240', - ['"240000"', '"480000"', '"720000"', '"960000"', '"1200000"', - '"1440000"', '"1680000"', '"1920000"', '"2160000"', '"2400000"']), + ['240000', '480000', '720000', '960000', '1200000', + '1440000', '1680000', '1920000', '2160000', '2400000']), ] ) @pytest.mark.wrapper_d -def test_handle_format_lists(metplus_config, input, expected_output): - config = metplus_config([TEST_CONF]) - config.set('config', 'FCST_LEAD_LIST', input) - wrapper = StatAnalysisWrapper(config) - assert wrapper.c_dict['FCST_LEAD_LIST'] == expected_output +def test_get_met_time_list(metplus_config, input_str, expected_output): + wrapper = stat_analysis_wrapper(metplus_config) + assert wrapper._get_met_time_list(input_str) == expected_output @pytest.mark.wrapper_d @@ -89,9 +89,8 @@ def test_create_c_dict(metplus_config): assert c_dict['DESC_LIST'] == [] assert c_dict['FCST_LEAD_LIST'] == [] assert c_dict['OBS_LEAD_LIST'] == [] - assert c_dict['FCST_VALID_HOUR_LIST'] == ['"000000"'] - assert c_dict['FCST_INIT_HOUR_LIST'] == ['"000000"', '"060000"', - '"120000"', '"180000"'] + assert c_dict['FCST_VALID_HOUR_LIST'] == ['00'] + assert c_dict['FCST_INIT_HOUR_LIST'] == ['00', '06', '12', '18'] assert c_dict['OBS_VALID_HOUR_LIST'] == [] assert c_dict['OBS_INIT_HOUR_LIST'] == [] assert c_dict['VX_MASK_LIST'] == [] @@ -222,7 +221,7 @@ def test_build_stringsub_dict(metplus_config): config_dict['OBS_UNITS'] = '' config_dict['FCST_THRESH'] = '' config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' config_dict['INTERP_PNTS'] = '' config_dict['FCST_LEAD'] = '' config_dict['LINE_TYPE'] = '' @@ -253,41 +252,41 @@ def test_build_stringsub_dict(metplus_config): datetime.datetime(2019, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['valid_end'] == datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour_end'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['fcst_valid_hour'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['fcst_valid_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['fcst_valid_hour_end'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['fcst_valid_beg'] == datetime.datetime(2019, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['fcst_valid_end'] == datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour_end'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['valid_hour'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['valid_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['valid_hour_end'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['model'] == 'MODEL_TEST') assert(test_stringsub_dict['obtype'] == 'MODEL_TEST_ANL') assert(test_stringsub_dict['fcst_init_hour'] == '000000_060000_120000_180000') - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) + assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta(hours=0) + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['fcst_init_hour_end'] == relativedelta(hours=18) + #datetime.datetime(1900, 1, 1, 18, 0, 0)) + assert test_stringsub_dict['init_hour_beg'] == relativedelta(hours=0) + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['init_hour_end'] == relativedelta(hours=18) + #datetime.datetime(1900, 1, 1, 18, 0, 0)) assert(test_stringsub_dict['fcst_var'] == '') assert(test_stringsub_dict['fcst_level'] == '') assert(test_stringsub_dict['fcst_units'] == '') assert(test_stringsub_dict['fcst_thresh'] == '') assert(test_stringsub_dict['desc'] == '') # Test 2 - config_dict['FCST_LEAD'] = '240000' + config_dict['FCST_LEAD'] = '24' st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' @@ -317,7 +316,7 @@ def test_build_stringsub_dict(metplus_config): assert(test_stringsub_dict['lead_sec'] == '00') assert(test_stringsub_dict['lead'] == '240000') # Test 3 - config_dict['FCST_LEAD'] = '1200000' + config_dict['FCST_LEAD'] = '120' st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' @@ -352,18 +351,18 @@ def test_build_stringsub_dict(metplus_config): st.c_dict['DATE_TYPE'] = 'INIT' test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['fcst_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_end'] == + assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['fcst_init_hour_end'] == relativedelta(hours=18) + #datetime.datetime(1900, 1, 1, 18, 0, 0)) + assert (test_stringsub_dict['fcst_init_beg'] == + datetime.datetime(2019, 1, 1, 0, 0, 0)) + assert (test_stringsub_dict['fcst_init_end'] == datetime.datetime(2019, 1, 5, 18, 0, 0)) - assert(test_stringsub_dict['init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) + assert test_stringsub_dict['init_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert test_stringsub_dict['init_hour_end'] == relativedelta(hours=18) + #datetime.datetime(1900, 1, 1, 18, 0, 0)) assert(test_stringsub_dict['init_beg'] == datetime.datetime(2019, 1, 1, 0, 0, 0)) assert(test_stringsub_dict['init_end'] == @@ -397,14 +396,15 @@ def test_build_stringsub_dict(metplus_config): datetime.datetime(2019, 1, 1, 0, 0 ,0)) assert(test_stringsub_dict['obs_init_end'] == datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['obs_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['obs_init_hour_end'] == - datetime.datetime(1900, 1, 1, 23, 59 ,59)) + assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert (test_stringsub_dict['fcst_init_hour_end'] == + relativedelta(hours=23, minutes=59, seconds=59)) + #datetime.datetime(1900, 1, 1, 23, 59 ,59)) + assert test_stringsub_dict['obs_init_hour_beg'] == relativedelta() + #datetime.datetime(1900, 1, 1, 0, 0, 0)) + assert (test_stringsub_dict['obs_init_hour_end'] == + relativedelta(hours=23, minutes=59, seconds=59)) @pytest.mark.wrapper_d @@ -416,7 +416,7 @@ def test_get_output_filename(metplus_config): # as expected st = stat_analysis_wrapper(metplus_config) config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' + config_dict['FCST_VALID_HOUR'] = '0' config_dict['FCST_VAR'] = '' config_dict['FCST_LEVEL'] = '' config_dict['INTERP_MTHD'] = '' @@ -427,7 +427,7 @@ def test_get_output_filename(metplus_config): config_dict['OBS_UNITS'] = '' config_dict['FCST_THRESH'] = '' config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' config_dict['INTERP_PNTS'] = '' config_dict['FCST_LEAD'] = '' config_dict['LINE_TYPE'] = '' @@ -535,7 +535,7 @@ def test_get_lookin_dir(metplus_config): # as expected st = stat_analysis_wrapper(metplus_config) config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' + config_dict['FCST_VALID_HOUR'] = '0' config_dict['FCST_VAR'] = '' config_dict['FCST_LEVEL'] = '' config_dict['INTERP_MTHD'] = '' @@ -546,7 +546,7 @@ def test_get_lookin_dir(metplus_config): config_dict['OBS_UNITS'] = '' config_dict['FCST_THRESH'] = '' config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' config_dict['INTERP_PNTS'] = '' config_dict['FCST_LEAD'] = '' config_dict['LINE_TYPE'] = '' @@ -622,8 +622,8 @@ def test_format_valid_init(metplus_config): st.c_dict['DATE_TYPE'] = 'VALID' config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_INIT_HOUR'] = '"000000", "120000"' + config_dict['FCST_VALID_HOUR'] = '0' + config_dict['FCST_INIT_HOUR'] = '0, 12' config_dict['OBS_VALID_HOUR'] = '' config_dict['OBS_INIT_HOUR'] = '' config_dict = st.format_valid_init(config_dict) @@ -645,8 +645,8 @@ def test_format_valid_init(metplus_config): st.c_dict['DATE_TYPE'] = 'VALID' config_dict = {} - config_dict['FCST_VALID_HOUR'] = '"000000", "120000"' - config_dict['FCST_INIT_HOUR'] = '"000000", "120000"' + config_dict['FCST_VALID_HOUR'] = '0, 12' + config_dict['FCST_INIT_HOUR'] = '0, 12' config_dict['OBS_VALID_HOUR'] = '' config_dict['OBS_INIT_HOUR'] = '' config_dict = st.format_valid_init(config_dict) @@ -671,7 +671,7 @@ def test_format_valid_init(metplus_config): config_dict['FCST_VALID_HOUR'] = '' config_dict['FCST_INIT_HOUR'] = '' config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' + config_dict['OBS_INIT_HOUR'] = '0, 12' config_dict = st.format_valid_init(config_dict) assert config_dict['FCST_VALID_BEG'] == '' assert config_dict['FCST_VALID_END'] == '' @@ -694,7 +694,7 @@ def test_format_valid_init(metplus_config): config_dict['FCST_VALID_HOUR'] = '' config_dict['FCST_INIT_HOUR'] = '' config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' + config_dict['OBS_INIT_HOUR'] = '0, 12' config_dict = st.format_valid_init(config_dict) assert config_dict['FCST_VALID_BEG'] == '' assert config_dict['FCST_VALID_END'] == '' From 6238de4b5048aa7c64f866c5bb764e36ba14693c Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 4 Oct 2022 17:23:09 -0600 Subject: [PATCH 29/92] refactored wrapper to be more readable, use relativedelta for hour/lead offsets to be consistent with other wrappers, ci-run-diff --- metplus/util/time_util.py | 9 +- metplus/wrappers/stat_analysis_wrapper.py | 413 ++++++++++++---------- 2 files changed, 232 insertions(+), 190 deletions(-) diff --git a/metplus/util/time_util.py b/metplus/util/time_util.py index 4eb180f30e..a28fd63d58 100755 --- a/metplus/util/time_util.py +++ b/metplus/util/time_util.py @@ -89,13 +89,13 @@ def get_seconds_from_string(value, default_unit='S', valid_time=None): rd_obj = get_relativedelta(value, default_unit) return ti_get_seconds_from_relativedelta(rd_obj, valid_time) -def time_string_to_met_time(time_string, default_unit='S'): +def time_string_to_met_time(time_string, default_unit='S', force_hms=False): """!Convert time string (3H, 4M, 7, etc.) to format expected by the MET tools ([H]HH[MM[SS]])""" total_seconds = get_seconds_from_string(time_string, default_unit) - return seconds_to_met_time(total_seconds) + return seconds_to_met_time(total_seconds, force_hms=force_hms) -def seconds_to_met_time(total_seconds): +def seconds_to_met_time(total_seconds, force_hms=False): seconds_time_string = str(total_seconds % 60).zfill(2) minutes_time_string = str(total_seconds // 60 % 60).zfill(2) hour_time_string = str(total_seconds // 3600).zfill(2) @@ -103,7 +103,8 @@ def seconds_to_met_time(total_seconds): # if hour is 6 or more digits, we need to add minutes and seconds # also if minutes and/or seconds they are defined # add minutes if seconds are defined as well - if len(hour_time_string) > 5 or minutes_time_string != '00' or seconds_time_string != '00': + if (force_hms or len(hour_time_string) > 5 or + minutes_time_string != '00' or seconds_time_string != '00'): return hour_time_string + minutes_time_string + seconds_time_string else: return hour_time_string diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 5c54689c07..c4dce66411 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -17,12 +17,15 @@ import glob import datetime import itertools +from dateutil.relativedelta import relativedelta from ..util import getlist from ..util import met_util as util from ..util import do_string_sub, find_indices_in_config_section from ..util import parse_var_list, remove_quotes from ..util import get_start_and_end_times +from ..util import time_string_to_met_time, get_relativedelta +from ..util import ti_get_seconds_from_relativedelta from . import CommandBuilder class StatAnalysisWrapper(CommandBuilder): @@ -330,24 +333,34 @@ def _format_conf_list(self, conf_list): formatted_items = [] for item in items: - sub_items = [] - for sub_item in item.split(','): - # if list in format lists, zero pad value to be at least 2 - # digits, then add zeros to make 6 digits - if conf_list in self.FORMAT_LISTS: - sub_item = self._format_hms(sub_item) - sub_items.append(sub_item) - - # format list as string with quotes around each item - sub_item_str = '", "'.join(sub_items) - formatted_items.append(f'"{sub_item_str}"') + # do not format items in format list now + if conf_list not in self.FORMAT_LISTS: + sub_items = item.split(',') + sub_item_str = '", "'.join(sub_items) + formatted_items.append(f'"{sub_item_str}"') + else: + formatted_items.append(item) + + # formatted_items = [] + # for item in items: + # sub_items = [] + # for sub_item in item.split(','): + # # if list in format lists, zero pad value to be at least 2 + # # digits, then add zeros to make 6 digits + # if conf_list in self.FORMAT_LISTS: + # sub_item = self._format_hms(sub_item) + # sub_items.append(sub_item) + # + # # format list as string with quotes around each item + # sub_item_str = '", "'.join(sub_items) + # formatted_items.append(f'"{sub_item_str}"') return formatted_items - @staticmethod - def _format_hms(value): - padded_value = value.zfill(2) - return padded_value.ljust(len(padded_value) + 4, '0') + # @staticmethod + # def _format_hms(value): + # padded_value = value.zfill(2) + # return padded_value.ljust(len(padded_value) + 4, '0') @staticmethod def list_to_str(list_of_values, add_quotes=True): @@ -373,13 +386,39 @@ def list_to_str(list_of_values, add_quotes=True): return ', '.join(list_of_values) @staticmethod - def str_to_list(string_value, sort_list=False): - # remove double quotes and split by comma - str_list = string_value.replace('"', '').split(',') - str_list = [item.strip() for item in str_list] + def _format_time_list(string_value, get_met_format, sort_list=True): + out_list = [] + if not string_value: + return [] + for time_string in string_value.split(','): + time_string = time_string.strip() + if get_met_format: + value = time_string_to_met_time(time_string, default_unit='H', + force_hms=True) + out_list.append(value) + else: + delta_obj = get_relativedelta(time_string, default_unit='H') + out_list.append(delta_obj) + if sort_list: - str_list.sort() - return str_list + if get_met_format: + out_list.sort(key=int) + else: + out_list.sort(key=ti_get_seconds_from_relativedelta) + + return out_list + + @staticmethod + def _get_met_time_list(string_value, sort_list=True): + return StatAnalysisWrapper._format_time_list(string_value, + get_met_format=True, + sort_list=sort_list) + + @staticmethod + def _get_relativedelta_list(string_value, sort_list=True): + return StatAnalysisWrapper._format_time_list(string_value, + get_met_format=False, + sort_list=sort_list) def set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file @@ -500,78 +539,96 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict['OBTYPE'].replace('"', '').replace(' ', '') ) elif 'HOUR' in list_name: - stringsub_dict[list_name.lower()] = ( - datetime.datetime.strptime(list_name_value, '%H%M%S') - ) - stringsub_dict[list_name.lower()+'_beg'] = stringsub_dict[ - list_name.lower() - ] - stringsub_dict[list_name.lower()+'_end'] = stringsub_dict[ - list_name.lower() - ] - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] + # TODO: should this only handle opposite of date_type? + delta_list = self._get_relativedelta_list(list_name_value) + if not delta_list: + continue + if len(delta_list) == 1: + stringsub_dict[list_name.lower()] = delta_list[0] + else: + stringsub_dict[list_name.lower()] = ( + '_'.join(self._get_met_time_list(list_name_value)) + ) + # stringsub_dict[list_name.lower()] = ( + # datetime.datetime.strptime(list_name_value, '%H%M%S') + # ) + stringsub_dict[list_name.lower() + '_beg'] = delta_list[0] + stringsub_dict[list_name.lower() + '_end'] = delta_list[-1] + # stringsub_dict[list_name.lower()+'_beg'] = stringsub_dict[ + # list_name.lower() + # ] + # stringsub_dict[list_name.lower()+'_end'] = stringsub_dict[ + # list_name.lower() + # ] + + if 'FCST' in list_name: + check_list = config_dict[list_name.replace('FCST', + 'OBS')] + elif 'OBS' in list_name: + check_list = config_dict[list_name.replace('OBS', + 'FCST')] + # if opposite fcst is not set or the same, + # set init/valid hour beg/end to fcst, same for obs + if not check_list or config_dict[list_name] == check_list: + # list type e.g. fcst_valid_hour + list_type = list_name.lower() + # generic list e.g. valid_hour + generic_list = ( + list_type.replace('fcst_', '').replace('obs_', '') + ) + stringsub_dict[f'{generic_list}_beg'] = ( + stringsub_dict[f'{list_type}_beg'] + ) + stringsub_dict[f'{generic_list}_end'] = ( + stringsub_dict[f'{list_type}_end'] + ) + if (stringsub_dict[f'{generic_list}_beg'] == + stringsub_dict[f'{generic_list}_end']): + stringsub_dict[generic_list] = ( + stringsub_dict[f'{list_type}_end'] ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) - # if multiple leads are specified, do not format lead info - # this behavior is the same as if lead list is in group lists - elif 'LEAD' in list_name and len(list_name_value.split(',')) == 1: - lead_timedelta = datetime.timedelta( - hours=int(list_name_value[:-4]), - minutes=int(list_name_value[-4:-2]), - seconds=int(list_name_value[-2:]) - ) - stringsub_dict[list_name.lower()] = list_name_value - stringsub_dict[list_name.lower()+'_hour'] = ( - list_name_value[:-4] - ) - stringsub_dict[list_name.lower()+'_min'] = ( - list_name_value[-4:-2] - ) - stringsub_dict[list_name.lower()+'_sec'] = ( - list_name_value[-2:] - ) - stringsub_dict[list_name.lower()+'_totalsec'] = str(int( - lead_timedelta.total_seconds() - )) - list_type = list_name.replace('_LEAD', '').lower() - check_list1 = config_dict[list_name] + + elif 'LEAD' in list_name: + lead_list = self._get_met_time_list(list_name_value) + + # if multiple leads are specified, format lead info + # using met time notation separated by underscore + if len(lead_list) > 1: + stringsub_dict[list_name.lower()] = ( + '_'.join(lead_list) + ) + continue + + stringsub_dict[list_name.lower()] = lead_list[0] + + lead_rd = self._get_relativedelta_list(list_name_value)[0] + total_sec = ti_get_seconds_from_relativedelta(lead_rd) + stringsub_dict[list_name.lower()+'_totalsec'] = str(total_sec) + + stringsub_dict[f'{list_name.lower()}_hour'] = lead_list[0][:-4] + stringsub_dict[f'{list_name.lower()}_min'] = lead_list[0][-4:-2] + stringsub_dict[f'{list_name.lower()}_sec'] = lead_list[0][-2:] + # lead_timedelta = datetime.timedelta( + # hours=int(list_name_value[:-4]), + # minutes=int(list_name_value[-4:-2]), + # seconds=int(list_name_value[-2:]) + # ) + # #stringsub_dict[list_name.lower()] = list_name_value + # stringsub_dict[list_name.lower()+'_hour'] = ( + # list_name_value[:-4] + # ) + # stringsub_dict[list_name.lower()+'_min'] = ( + # list_name_value[-4:-2] + # ) + # stringsub_dict[list_name.lower()+'_sec'] = ( + # list_name_value[-2:] + # ) + if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', 'OBS')] + check_list = config_dict[list_name.replace('FCST', 'OBS')] elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): + check_list = config_dict[list_name.replace('OBS', 'FCST')] + if not check_list or config_dict[list_name] == check_list: stringsub_dict['lead'] = stringsub_dict[list_name.lower()] stringsub_dict['lead_hour'] = ( stringsub_dict[list_name.lower()+'_hour'] @@ -595,102 +652,83 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict[list_name].replace('"', '').replace(' ', '') .replace(',', '_').replace('*', 'ALL') ) - if 'HOUR' in list_name: + if 'LEAD' in list_name: + lead_list = self._get_met_time_list(config_dict[list_name]) + stringsub_dict[list_name.lower()] = '_'.join(lead_list) + + elif 'HOUR' in list_name: list_name_values_list = ( config_dict[list_name].replace('"', '').split(', ') ) stringsub_dict[list_name.lower()] = list_name_value - if list_name_values_list != ['']: - stringsub_dict[list_name.lower()+'_beg'] = ( - datetime.datetime.strptime(list_name_values_list[0], - '%H%M%S') - ) + if list_name_values_list == ['']: + stringsub_dict[list_name.lower()+'_beg'] = relativedelta() stringsub_dict[list_name.lower()+'_end'] = ( - datetime.datetime.strptime(list_name_values_list[-1], - '%H%M%S') + relativedelta(hours=+23, minutes=+59, seconds=+59) ) - if (stringsub_dict[list_name.lower()+'_beg'] - == stringsub_dict[list_name.lower()+'_end']): - stringsub_dict[list_name.lower()] = ( - stringsub_dict[list_name.lower()+'_end'] - ) - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) + # stringsub_dict[list_name.lower()+'_beg'] = ( + # datetime.datetime.strptime('000000', + # '%H%M%S') + # ) + # stringsub_dict[list_name.lower()+'_end'] = ( + # datetime.datetime.strptime('235959', + # '%H%M%S') + # ) else: - stringsub_dict[list_name.lower()+'_beg'] = ( - datetime.datetime.strptime('000000', - '%H%M%S') + # TODO: should this only handle opposite of date_type? + delta_list = self._get_relativedelta_list(config_dict[list_name]) + if not delta_list: + continue + if len(delta_list) == 1: + stringsub_dict[list_name.lower()] = delta_list[0] + else: + stringsub_dict[list_name.lower()] = ( + '_'.join(self._get_met_time_list(config_dict[list_name])) + ) + + # set min and max values as beg and end + stringsub_dict[list_name.lower() + '_beg'] = delta_list[0] + stringsub_dict[list_name.lower() + '_end'] = delta_list[-1] + + # stringsub_dict[list_name.lower()+'_beg'] = ( + # datetime.datetime.strptime(list_name_values_list[0], + # '%H%M%S') + # ) + # stringsub_dict[list_name.lower()+'_end'] = ( + # datetime.datetime.strptime(list_name_values_list[-1], + # '%H%M%S') + # ) + # if (stringsub_dict[list_name.lower()+'_beg'] + # == stringsub_dict[list_name.lower()+'_end']): + # stringsub_dict[list_name.lower()] = ( + # stringsub_dict[list_name.lower()+'_end'] + # ) + + if 'FCST' in list_name: + check_list = config_dict[list_name.replace('FCST', + 'OBS')] + elif 'OBS' in list_name: + check_list = config_dict[list_name.replace('OBS', + 'FCST')] + if not check_list or config_dict[list_name] == check_list: + # list type e.g. fcst_valid_hour + list_type = list_name.lower() + # generic list e.g. valid_hour + generic_list = ( + list_type.replace('fcst_', '').replace('obs_', '') ) - stringsub_dict[list_name.lower()+'_end'] = ( - datetime.datetime.strptime('235959', - '%H%M%S') + stringsub_dict[f'{generic_list}_beg'] = ( + stringsub_dict[f'{list_type}_beg'] ) - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) + stringsub_dict[f'{generic_list}_end'] = ( + stringsub_dict[f'{list_type}_end'] + ) + if (stringsub_dict[f'{generic_list}_beg'] == + stringsub_dict[f'{generic_list}_end']): + stringsub_dict[generic_list] = ( + stringsub_dict[f'{list_type}_end'] + ) + else: stringsub_dict[list_name.lower()] = list_name_value @@ -714,12 +752,12 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): """ date_type = self.c_dict['DATE_TYPE'].lower() if fcst_hour_str: - fcst_hour_list = self.str_to_list(fcst_hour_str, sort_list=True) + fcst_hour_list = self._get_relativedelta_list(fcst_hour_str) else: fcst_hour_list = None if obs_hour_str: - obs_hour_list = self.str_to_list(obs_hour_str, sort_list=True) + obs_hour_list = self._get_relativedelta_list(obs_hour_str) else: obs_hour_list = None @@ -763,14 +801,10 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): if hour_list: sub_dict[f'{prefix}_beg'] = ( - datetime.datetime.strptime( - date_beg + hour_list[0], '%Y%m%d%H%M%S' - ) + datetime.datetime.strptime(date_beg, '%Y%m%d') + hour_list[0] ) sub_dict[f'{prefix}_end'] = ( - datetime.datetime.strptime( - date_end + hour_list[-1], '%Y%m%d%H%M%S' - ) + datetime.datetime.strptime(date_end, '%Y%m%d') + hour_list[-1] ) if sub_dict[f'{prefix}_beg'] == sub_dict[f'{prefix}_end']: sub_dict[prefix] = sub_dict[f'{prefix}_beg'] @@ -838,10 +872,11 @@ def get_output_filename(self, output_type, filename_template, if loop_list != 'MODEL_LIST': list_name = loop_list.replace('_LIST', '') if 'HOUR' in list_name: + value = self._get_met_time_list(config_dict[list_name])[0] filename_template = ( filename_template+'_' +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '')+'Z' + +value+'Z' ) else: filename_template = ( @@ -913,6 +948,12 @@ def format_valid_init(self, config_dict): date_end = self.c_dict['DATE_END'] date_type = self.c_dict['DATE_TYPE'] + for list_name in self.FORMAT_LISTS: + list_name = list_name.replace('_LIST', '') + values = self._get_met_time_list(config_dict.get(list_name, '')) + values = [f'"{item}"' for item in values] + config_dict[list_name] = ', '.join(values) + fcst_valid_hour_list = config_dict['FCST_VALID_HOUR'].split(', ') fcst_init_hour_list = config_dict['FCST_INIT_HOUR'].split(', ') obs_valid_hour_list = config_dict['OBS_VALID_HOUR'].split(', ') From f7a23fad56e1c77f4acefaeff5a8333c6f1e1e19 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 12:07:18 -0600 Subject: [PATCH 30/92] refactored unit tests to use parameterize and cleaned up code --- .../stat_analysis/test_stat_analysis.py | 422 +++++++----------- metplus/wrappers/stat_analysis_wrapper.py | 156 +++---- 2 files changed, 214 insertions(+), 364 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 9e06d2a4b8..cb8e616a57 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -201,16 +201,9 @@ def test_format_thresh(metplus_config, expression, expected_result): assert st.format_thresh(expression) == expected_result - -@pytest.mark.wrapper_d -def test_build_stringsub_dict(metplus_config): - # Independently test the building of - # the dictionary used in the stringtemplate - # substitution and the values are being set - # as expected - st = stat_analysis_wrapper(metplus_config) +def _set_config_dict_values(): config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' + config_dict['FCST_VALID_HOUR'] = '0' config_dict['FCST_VAR'] = '' config_dict['FCST_LEVEL'] = '' config_dict['INTERP_MTHD'] = '' @@ -233,212 +226,167 @@ def test_build_stringsub_dict(metplus_config): config_dict['OBS_VALID_HOUR'] = '' config_dict['ALPHA'] = '' config_dict['OBS_LEVEL'] = '' + return config_dict + +@pytest.mark.parametrize( + 'lists_to_loop,c_dict_overrides,config_dict_overrides,expected_values', [ + # Test 0 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {}, + {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_hour': relativedelta(), + 'valid_hour': relativedelta(), + 'fcst_valid_hour_beg': relativedelta(), + 'fcst_valid_hour_end': relativedelta(), + 'valid_hour_beg': relativedelta(), + 'valid_hour_end': relativedelta(), + 'model': 'MODEL_TEST', + 'obtype': 'MODEL_TEST_ANL', + 'fcst_init_hour': '000000_060000_120000_180000', + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=18), + 'init_hour_beg': relativedelta(), + 'init_hour_end': relativedelta(hours=18), + 'fcst_var': '', + 'fcst_level': '', + 'fcst_units': '', + 'fcst_thresh': '', + 'desc': '', + }, + ), + # Test 1 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_LEAD': '24'}, + {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_lead_totalsec': '86400', + 'fcst_lead_hour': '24', + 'fcst_lead_min': '00', + 'fcst_lead_sec': '00', + 'fcst_lead': '240000', + 'lead_totalsec': '86400', + 'lead_hour': '24', + 'lead_min': '00', + 'lead_sec': '00', + 'lead': '240000', + }, + ), + # Test 2 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', + 'DATE_TYPE': 'VALID'}, + {'FCST_LEAD': '120'}, + {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_lead_totalsec': '432000', + 'fcst_lead_hour': '120', + 'fcst_lead_min': '00', + 'fcst_lead_sec': '00', + 'fcst_lead': '1200000', + 'lead_totalsec': '432000', + 'lead_hour': '120', + 'lead_min': '00', + 'lead_sec': '00', + 'lead': '1200000', + }, + ), + # Test 3 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', + 'DATE_TYPE': 'INIT'}, + {}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 5, 18, 0, 0), + 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_init_end': datetime.datetime(2019, 1, 5, 18, 0, 0), + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=18), + 'init_hour_beg': relativedelta(), + 'init_hour_end': relativedelta(hours=18), + }, + ), + # Test 4 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', + 'DATE_TYPE': 'INIT'}, + {'FCST_INIT_HOUR': '', 'FCST_LEAD': ''}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'obs_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'obs_init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=23, minutes=59, seconds=59), + 'obs_init_hour_beg': relativedelta(), + 'obs_init_hour_end': relativedelta(hours=23, minutes=59, seconds=59), + }, + ), + ] +) +@pytest.mark.wrapper_d +def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, + config_dict_overrides, expected_values): + # Independently test the building of + # the dictionary used in the stringtemplate + # substitution and the values are being set + # as expected + st = stat_analysis_wrapper(metplus_config) + config_dict = _set_config_dict_values() + # Test 1 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_end'] == - datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert test_stringsub_dict['fcst_valid_hour'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['fcst_valid_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['fcst_valid_hour_end'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_end'] == - datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert test_stringsub_dict['valid_hour'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['valid_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['valid_hour_end'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['model'] == 'MODEL_TEST') - assert(test_stringsub_dict['obtype'] == 'MODEL_TEST_ANL') - assert(test_stringsub_dict['fcst_init_hour'] == - '000000_060000_120000_180000') - assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta(hours=0) - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['fcst_init_hour_end'] == relativedelta(hours=18) - #datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert test_stringsub_dict['init_hour_beg'] == relativedelta(hours=0) - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['init_hour_end'] == relativedelta(hours=18) - #datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['fcst_var'] == '') - assert(test_stringsub_dict['fcst_level'] == '') - assert(test_stringsub_dict['fcst_units'] == '') - assert(test_stringsub_dict['fcst_thresh'] == '') - assert(test_stringsub_dict['desc'] == '') - # Test 2 - config_dict['FCST_LEAD'] = '24' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_lead_totalsec'] == '86400') - assert(test_stringsub_dict['fcst_lead_hour'] == '24') - assert(test_stringsub_dict['fcst_lead_min'] == '00') - assert(test_stringsub_dict['fcst_lead_sec'] == '00') - assert(test_stringsub_dict['fcst_lead'] == '240000') - assert(test_stringsub_dict['lead_totalsec'] == '86400') - assert(test_stringsub_dict['lead_hour'] == '24') - assert(test_stringsub_dict['lead_min'] == '00') - assert(test_stringsub_dict['lead_sec'] == '00') - assert(test_stringsub_dict['lead'] == '240000') - # Test 3 - config_dict['FCST_LEAD'] = '120' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_lead_totalsec'] == '432000') - assert(test_stringsub_dict['fcst_lead_hour'] == '120') - assert(test_stringsub_dict['fcst_lead_min'] == '00') - assert(test_stringsub_dict['fcst_lead_sec'] == '00') - assert(test_stringsub_dict['fcst_lead'] == '1200000') - assert(test_stringsub_dict['lead_totalsec'] == '432000') - assert(test_stringsub_dict['lead_hour'] == '120') - assert(test_stringsub_dict['lead_min'] == '00') - assert(test_stringsub_dict['lead_sec'] == '00') - assert(test_stringsub_dict['lead'] == '1200000') - # Test 4 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'INIT' - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['fcst_init_hour_end'] == relativedelta(hours=18) - #datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert (test_stringsub_dict['fcst_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert (test_stringsub_dict['fcst_init_end'] == - datetime.datetime(2019, 1, 5, 18, 0, 0)) - assert test_stringsub_dict['init_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert test_stringsub_dict['init_hour_end'] == relativedelta(hours=18) - #datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_end'] == - datetime.datetime(2019, 1, 5, 18, 0, 0)) - # Test 5 - config_dict['FCST_INIT_HOUR'] = '' - config_dict['FCST_LEAD'] = '' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'INIT' - lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] + for key, value in c_dict_overrides.items(): + st.c_dict[key] = value + + for key, value in config_dict_overrides.items(): + config_dict[key] = value + + lists_to_group = [item for item in st.EXPECTED_CONFIG_LISTS + if item not in lists_to_loop] test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) - assert(test_stringsub_dict['init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['fcst_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['fcst_init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['obs_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['obs_init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert test_stringsub_dict['fcst_init_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert (test_stringsub_dict['fcst_init_hour_end'] == - relativedelta(hours=23, minutes=59, seconds=59)) - #datetime.datetime(1900, 1, 1, 23, 59 ,59)) - assert test_stringsub_dict['obs_init_hour_beg'] == relativedelta() - #datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert (test_stringsub_dict['obs_init_hour_end'] == - relativedelta(hours=23, minutes=59, seconds=59)) + for key, value in expected_values.items(): + print(f'key: {key}') + assert test_stringsub_dict[key] == value +@pytest.mark.parametrize( + 'filename_template, output_type, filename_type,expected_output', [ + (('{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/' + '{model?fmt=%s}_{valid?fmt=%Y%m%d}.stat'), + 'dump_row', 'user', '00Z/MODEL_TEST/MODEL_TEST_20190101.stat'), + (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}_' + 'fcstvalidhour000000Z_dump_row.stat'), + 'dump_row', 'user', ('MODEL_TEST_MODEL_TEST_ANL_valid20190101_' + 'fcstvalidhour000000Z_dump_row.stat') + ), + (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}' + '{valid_hour?fmt=%H}_init{fcst_init_hour?fmt=%s}.stat'), + 'out_stat', 'user', ('MODEL_TEST_MODEL_TEST_ANL_valid2019010100' + '_init000000_060000_120000_180000.stat') + ), + ('{model?fmt=%s}_{obtype?fmt=%s}', + 'out_stat', 'default', ('MODEL_TEST_MODEL_TEST_ANLvalid20190101' + '_fcstvalidhour000000Z_out_stat.stat') + ), + ] +) @pytest.mark.wrapper_d -def test_get_output_filename(metplus_config): +def test_get_output_filename(metplus_config, filename_template, output_type, + filename_type, expected_output): # Independently test the building of # the output file name # using string template substitution # and test the values is # as expected st = stat_analysis_wrapper(metplus_config) - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '0' - config_dict['FCST_VAR'] = '' - config_dict['FCST_LEVEL'] = '' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '' + config_dict = _set_config_dict_values() + st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' @@ -451,78 +399,14 @@ def test_get_output_filename(metplus_config): 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST'] lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] - # Test 1 - expected_output_filename = '00Z/MODEL_TEST/MODEL_TEST_20190101.stat' - output_type = 'dump_row' - filename_template = ( - '{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/{model?fmt=%s}_{valid?fmt=%Y%m%d}.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 2 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL_' - +'valid20190101_fcstvalidhour000000Z' - +'_dump_row.stat' - ) - output_type = 'dump_row' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - +'_valid{valid?fmt=%Y%m%d}_' - +'fcstvalidhour000000Z_dump_row.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 3 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL' - +'_valid2019010100' - +'_init000000_060000_120000_180000.stat' - ) - output_type = 'out_stat' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - +'_valid{valid?fmt=%Y%m%d}{valid_hour?fmt=%H}' - +'_init{fcst_init_hour?fmt=%s}.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 4 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL' - +'valid20190101_fcstvalidhour000000Z' - +'_out_stat.stat' - ) - output_type = 'out_stat' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - ) - filename_type = 'default' + test_output_filename = st.get_output_filename(output_type, filename_template, filename_type, lists_to_loop, lists_to_group, config_dict) - assert expected_output_filename == test_output_filename + assert expected_output == test_output_filename @pytest.mark.wrapper_d diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index c4dce66411..be1905d180 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -341,27 +341,8 @@ def _format_conf_list(self, conf_list): else: formatted_items.append(item) - # formatted_items = [] - # for item in items: - # sub_items = [] - # for sub_item in item.split(','): - # # if list in format lists, zero pad value to be at least 2 - # # digits, then add zeros to make 6 digits - # if conf_list in self.FORMAT_LISTS: - # sub_item = self._format_hms(sub_item) - # sub_items.append(sub_item) - # - # # format list as string with quotes around each item - # sub_item_str = '", "'.join(sub_items) - # formatted_items.append(f'"{sub_item_str}"') - return formatted_items - # @staticmethod - # def _format_hms(value): - # padded_value = value.zfill(2) - # return padded_value.ljust(len(padded_value) + 4, '0') - @staticmethod def list_to_str(list_of_values, add_quotes=True): """! Turn a list of values into a single string so it can be @@ -456,7 +437,8 @@ def set_lists_loop_or_group(self, c_dict): # loop through lists found in either loop or group lists originally for found_config in found_config_list: # if list is empty and in loop list, warn and move to group list - if not c_dict[found_config] and found_config in c_dict['LOOP_LIST_ITEMS']: + if (not c_dict[found_config] and + found_config in c_dict['LOOP_LIST_ITEMS']): self.logger.warning(found_config + " is empty, " + "will be treated as group.") c_dict['GROUP_LIST_ITEMS'].append(found_config) @@ -473,10 +455,11 @@ def set_lists_loop_or_group(self, c_dict): def format_thresh(thresh_str): """! Format thresholds for file naming - Args: - @param thresh_str string of the thresholds. Can be a comma-separated list, i.e. gt3,<=5.5, ==7 + @param thresh_str string of the thresholds. + Can be a comma-separated list, i.e. gt3,<=5.5, ==7 - @returns string of comma-separated list of the threshold(s) with letter format, i.e. gt3, le5.5, eq7 + @returns string of comma-separated list of the threshold(s) with + letter format, i.e. gt3, le5.5, eq7 """ formatted_thresh_list = [] # separate thresholds by comma and strip off whitespace around values @@ -528,38 +511,36 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): # Set loop information for loop_list in lists_to_loop: list_name = loop_list.replace('_LIST', '') - # TODO: change commas to underscores if loop item contains a list + sub_name = list_name.lower() list_name_value = ( config_dict[list_name].replace('"', '').replace(' ', '') + .replace(',', '_').replace('*', 'ALL') ) if list_name == 'MODEL': - stringsub_dict[list_name.lower()] = list_name_value + stringsub_dict[sub_name] = list_name_value stringsub_dict['obtype'] = ( config_dict['OBTYPE'].replace('"', '').replace(' ', '') ) elif 'HOUR' in list_name: # TODO: should this only handle opposite of date_type? - delta_list = self._get_relativedelta_list(list_name_value) + delta_list = self._get_relativedelta_list(config_dict[list_name]) if not delta_list: + # TODO: should this be set to 0:0:0 to 23:59:59? + stringsub_dict[sub_name + '_beg'] = relativedelta() + stringsub_dict[sub_name + '_end'] = ( + relativedelta(hours=+23, minutes=+59, seconds=+59) + ) continue if len(delta_list) == 1: - stringsub_dict[list_name.lower()] = delta_list[0] + stringsub_dict[sub_name] = delta_list[0] else: - stringsub_dict[list_name.lower()] = ( - '_'.join(self._get_met_time_list(list_name_value)) + stringsub_dict[sub_name] = ( + '_'.join(self._get_met_time_list(config_dict[list_name])) ) - # stringsub_dict[list_name.lower()] = ( - # datetime.datetime.strptime(list_name_value, '%H%M%S') - # ) - stringsub_dict[list_name.lower() + '_beg'] = delta_list[0] - stringsub_dict[list_name.lower() + '_end'] = delta_list[-1] - # stringsub_dict[list_name.lower()+'_beg'] = stringsub_dict[ - # list_name.lower() - # ] - # stringsub_dict[list_name.lower()+'_end'] = stringsub_dict[ - # list_name.lower() - # ] + + stringsub_dict[sub_name + '_beg'] = delta_list[0] + stringsub_dict[sub_name + '_end'] = delta_list[-1] if 'FCST' in list_name: check_list = config_dict[list_name.replace('FCST', @@ -570,107 +551,92 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): # if opposite fcst is not set or the same, # set init/valid hour beg/end to fcst, same for obs if not check_list or config_dict[list_name] == check_list: - # list type e.g. fcst_valid_hour - list_type = list_name.lower() + # sub name e.g. fcst_valid_hour # generic list e.g. valid_hour generic_list = ( - list_type.replace('fcst_', '').replace('obs_', '') + sub_name.replace('fcst_', '').replace('obs_', '') ) stringsub_dict[f'{generic_list}_beg'] = ( - stringsub_dict[f'{list_type}_beg'] + stringsub_dict[f'{sub_name}_beg'] ) stringsub_dict[f'{generic_list}_end'] = ( - stringsub_dict[f'{list_type}_end'] + stringsub_dict[f'{sub_name}_end'] ) if (stringsub_dict[f'{generic_list}_beg'] == stringsub_dict[f'{generic_list}_end']): stringsub_dict[generic_list] = ( - stringsub_dict[f'{list_type}_end'] + stringsub_dict[f'{sub_name}_end'] ) - + # TODO: LEAD can be used for both loop and group elif 'LEAD' in list_name: - lead_list = self._get_met_time_list(list_name_value) + lead_list = self._get_met_time_list(config_dict[list_name]) # if multiple leads are specified, format lead info # using met time notation separated by underscore if len(lead_list) > 1: - stringsub_dict[list_name.lower()] = ( + stringsub_dict[sub_name] = ( '_'.join(lead_list) ) continue - stringsub_dict[list_name.lower()] = lead_list[0] + stringsub_dict[sub_name] = lead_list[0] - lead_rd = self._get_relativedelta_list(list_name_value)[0] + lead_rd = self._get_relativedelta_list(config_dict[list_name])[0] total_sec = ti_get_seconds_from_relativedelta(lead_rd) - stringsub_dict[list_name.lower()+'_totalsec'] = str(total_sec) - - stringsub_dict[f'{list_name.lower()}_hour'] = lead_list[0][:-4] - stringsub_dict[f'{list_name.lower()}_min'] = lead_list[0][-4:-2] - stringsub_dict[f'{list_name.lower()}_sec'] = lead_list[0][-2:] - # lead_timedelta = datetime.timedelta( - # hours=int(list_name_value[:-4]), - # minutes=int(list_name_value[-4:-2]), - # seconds=int(list_name_value[-2:]) - # ) - # #stringsub_dict[list_name.lower()] = list_name_value - # stringsub_dict[list_name.lower()+'_hour'] = ( - # list_name_value[:-4] - # ) - # stringsub_dict[list_name.lower()+'_min'] = ( - # list_name_value[-4:-2] - # ) - # stringsub_dict[list_name.lower()+'_sec'] = ( - # list_name_value[-2:] - # ) + stringsub_dict[sub_name+'_totalsec'] = str(total_sec) + + stringsub_dict[f'{sub_name}_hour'] = lead_list[0][:-4] + stringsub_dict[f'{sub_name}_min'] = lead_list[0][-4:-2] + stringsub_dict[f'{sub_name}_sec'] = lead_list[0][-2:] if 'FCST' in list_name: check_list = config_dict[list_name.replace('FCST', 'OBS')] elif 'OBS' in list_name: check_list = config_dict[list_name.replace('OBS', 'FCST')] if not check_list or config_dict[list_name] == check_list: - stringsub_dict['lead'] = stringsub_dict[list_name.lower()] + stringsub_dict['lead'] = stringsub_dict[sub_name] stringsub_dict['lead_hour'] = ( - stringsub_dict[list_name.lower()+'_hour'] + stringsub_dict[sub_name+'_hour'] ) stringsub_dict['lead_min'] = ( - stringsub_dict[list_name.lower()+'_min'] + stringsub_dict[sub_name+'_min'] ) stringsub_dict['lead_sec'] = ( - stringsub_dict[list_name.lower()+'_sec'] + stringsub_dict[sub_name+'_sec'] ) stringsub_dict['lead_totalsec'] = ( - stringsub_dict[list_name.lower()+'_totalsec'] + stringsub_dict[sub_name+'_totalsec'] ) else: - stringsub_dict[list_name.lower()] = list_name_value + stringsub_dict[sub_name] = list_name_value # Set group information for group_list in lists_to_group: list_name = group_list.replace('_LIST', '') + sub_name = list_name.lower() list_name_value = ( config_dict[list_name].replace('"', '').replace(' ', '') .replace(',', '_').replace('*', 'ALL') ) if 'LEAD' in list_name: lead_list = self._get_met_time_list(config_dict[list_name]) - stringsub_dict[list_name.lower()] = '_'.join(lead_list) + stringsub_dict[sub_name] = '_'.join(lead_list) elif 'HOUR' in list_name: list_name_values_list = ( config_dict[list_name].replace('"', '').split(', ') ) - stringsub_dict[list_name.lower()] = list_name_value + stringsub_dict[sub_name] = list_name_value if list_name_values_list == ['']: - stringsub_dict[list_name.lower()+'_beg'] = relativedelta() - stringsub_dict[list_name.lower()+'_end'] = ( + stringsub_dict[sub_name+'_beg'] = relativedelta() + stringsub_dict[sub_name+'_end'] = ( relativedelta(hours=+23, minutes=+59, seconds=+59) ) - # stringsub_dict[list_name.lower()+'_beg'] = ( + # stringsub_dict[sub_name+'_beg'] = ( # datetime.datetime.strptime('000000', # '%H%M%S') # ) - # stringsub_dict[list_name.lower()+'_end'] = ( + # stringsub_dict[sub_name+'_end'] = ( # datetime.datetime.strptime('235959', # '%H%M%S') # ) @@ -680,28 +646,28 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): if not delta_list: continue if len(delta_list) == 1: - stringsub_dict[list_name.lower()] = delta_list[0] + stringsub_dict[sub_name] = delta_list[0] else: - stringsub_dict[list_name.lower()] = ( + stringsub_dict[sub_name] = ( '_'.join(self._get_met_time_list(config_dict[list_name])) ) # set min and max values as beg and end - stringsub_dict[list_name.lower() + '_beg'] = delta_list[0] - stringsub_dict[list_name.lower() + '_end'] = delta_list[-1] + stringsub_dict[sub_name + '_beg'] = delta_list[0] + stringsub_dict[sub_name + '_end'] = delta_list[-1] - # stringsub_dict[list_name.lower()+'_beg'] = ( + # stringsub_dict[sub_name+'_beg'] = ( # datetime.datetime.strptime(list_name_values_list[0], # '%H%M%S') # ) - # stringsub_dict[list_name.lower()+'_end'] = ( + # stringsub_dict[sub_name+'_end'] = ( # datetime.datetime.strptime(list_name_values_list[-1], # '%H%M%S') # ) - # if (stringsub_dict[list_name.lower()+'_beg'] - # == stringsub_dict[list_name.lower()+'_end']): - # stringsub_dict[list_name.lower()] = ( - # stringsub_dict[list_name.lower()+'_end'] + # if (stringsub_dict[sub_name+'_beg'] + # == stringsub_dict[sub_name+'_end']): + # stringsub_dict[sub_name] = ( + # stringsub_dict[sub_name+'_end'] # ) if 'FCST' in list_name: @@ -712,7 +678,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): 'FCST')] if not check_list or config_dict[list_name] == check_list: # list type e.g. fcst_valid_hour - list_type = list_name.lower() + list_type = sub_name # generic list e.g. valid_hour generic_list = ( list_type.replace('fcst_', '').replace('obs_', '') @@ -730,7 +696,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): ) else: - stringsub_dict[list_name.lower()] = list_name_value + stringsub_dict[sub_name] = list_name_value #nkeys_end = len(stringsub_dict_keys) # Some lines for debugging if needed in future From dc0e239a8e70725cbe368b28990a2f7d0fffc2a7 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 13:20:58 -0600 Subject: [PATCH 31/92] combine logic to process loop lists and group lists to consolidate logic and to be able to handle a loop list that contains a group of items, e.g. FCST_LEAD_LIST1, FCST_LEAD_LIST2, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 102 ++-------------------- 1 file changed, 8 insertions(+), 94 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index be1905d180..0c79a13135 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -509,8 +509,8 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict[f'OBS_{date_type}_HOUR']) # Set loop information - for loop_list in lists_to_loop: - list_name = loop_list.replace('_LIST', '') + for loop_or_group_list in lists_to_loop + lists_to_group: + list_name = loop_or_group_list.replace('_LIST', '') sub_name = list_name.lower() list_name_value = ( config_dict[list_name].replace('"', '').replace(' ', '') @@ -522,10 +522,12 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict['obtype'] = ( config_dict['OBTYPE'].replace('"', '').replace(' ', '') ) + # TODO: HOUR can be used for both loop and group elif 'HOUR' in list_name: # TODO: should this only handle opposite of date_type? delta_list = self._get_relativedelta_list(config_dict[list_name]) if not delta_list: + stringsub_dict[sub_name] = list_name_value # TODO: should this be set to 0:0:0 to 23:59:59? stringsub_dict[sub_name + '_beg'] = relativedelta() stringsub_dict[sub_name + '_end'] = ( @@ -571,6 +573,9 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): elif 'LEAD' in list_name: lead_list = self._get_met_time_list(config_dict[list_name]) + if not lead_list: + continue + # if multiple leads are specified, format lead info # using met time notation separated by underscore if len(lead_list) > 1: @@ -610,100 +615,9 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): else: stringsub_dict[sub_name] = list_name_value - # Set group information - for group_list in lists_to_group: - list_name = group_list.replace('_LIST', '') - sub_name = list_name.lower() - list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') - .replace(',', '_').replace('*', 'ALL') - ) - if 'LEAD' in list_name: - lead_list = self._get_met_time_list(config_dict[list_name]) - stringsub_dict[sub_name] = '_'.join(lead_list) - - elif 'HOUR' in list_name: - list_name_values_list = ( - config_dict[list_name].replace('"', '').split(', ') - ) - stringsub_dict[sub_name] = list_name_value - if list_name_values_list == ['']: - stringsub_dict[sub_name+'_beg'] = relativedelta() - stringsub_dict[sub_name+'_end'] = ( - relativedelta(hours=+23, minutes=+59, seconds=+59) - ) - # stringsub_dict[sub_name+'_beg'] = ( - # datetime.datetime.strptime('000000', - # '%H%M%S') - # ) - # stringsub_dict[sub_name+'_end'] = ( - # datetime.datetime.strptime('235959', - # '%H%M%S') - # ) - else: - # TODO: should this only handle opposite of date_type? - delta_list = self._get_relativedelta_list(config_dict[list_name]) - if not delta_list: - continue - if len(delta_list) == 1: - stringsub_dict[sub_name] = delta_list[0] - else: - stringsub_dict[sub_name] = ( - '_'.join(self._get_met_time_list(config_dict[list_name])) - ) - - # set min and max values as beg and end - stringsub_dict[sub_name + '_beg'] = delta_list[0] - stringsub_dict[sub_name + '_end'] = delta_list[-1] - - # stringsub_dict[sub_name+'_beg'] = ( - # datetime.datetime.strptime(list_name_values_list[0], - # '%H%M%S') - # ) - # stringsub_dict[sub_name+'_end'] = ( - # datetime.datetime.strptime(list_name_values_list[-1], - # '%H%M%S') - # ) - # if (stringsub_dict[sub_name+'_beg'] - # == stringsub_dict[sub_name+'_end']): - # stringsub_dict[sub_name] = ( - # stringsub_dict[sub_name+'_end'] - # ) - - if 'FCST' in list_name: - check_list = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list = config_dict[list_name.replace('OBS', - 'FCST')] - if not check_list or config_dict[list_name] == check_list: - # list type e.g. fcst_valid_hour - list_type = sub_name - # generic list e.g. valid_hour - generic_list = ( - list_type.replace('fcst_', '').replace('obs_', '') - ) - stringsub_dict[f'{generic_list}_beg'] = ( - stringsub_dict[f'{list_type}_beg'] - ) - stringsub_dict[f'{generic_list}_end'] = ( - stringsub_dict[f'{list_type}_end'] - ) - if (stringsub_dict[f'{generic_list}_beg'] == - stringsub_dict[f'{generic_list}_end']): - stringsub_dict[generic_list] = ( - stringsub_dict[f'{list_type}_end'] - ) - - else: - stringsub_dict[sub_name] = list_name_value - - #nkeys_end = len(stringsub_dict_keys) # Some lines for debugging if needed in future - #self.logger.info(nkeys_start) - #self.logger.info(nkeys_end) #for key, value in stringsub_dict.items(): - # self.logger.info("{} ({})".format(key, value)) + # self.logger.debug("{} ({})".format(key, value)) return stringsub_dict def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): From c7496608a7307bc724d2f424cdbaa3a8c5d29137 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 13:29:44 -0600 Subject: [PATCH 32/92] formatting --- .../stat_analysis/test_stat_analysis.py | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 641364ba81..c1d1a864d1 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -202,6 +202,7 @@ def test_format_thresh(metplus_config, expression, expected_result): assert st.format_thresh(expression) == expected_result + def _set_config_dict_values(): config_dict = {} config_dict['FCST_VALID_HOUR'] = '0' @@ -229,6 +230,7 @@ def _set_config_dict_values(): config_dict['OBS_LEVEL'] = '' return config_dict + @pytest.mark.parametrize( 'lists_to_loop,c_dict_overrides,config_dict_overrides,expected_values', [ # Test 0 @@ -236,29 +238,29 @@ def _set_config_dict_values(): {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, {}, {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), - 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), - 'fcst_valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), - 'fcst_valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), - 'fcst_valid_hour': relativedelta(), - 'valid_hour': relativedelta(), - 'fcst_valid_hour_beg': relativedelta(), - 'fcst_valid_hour_end': relativedelta(), - 'valid_hour_beg': relativedelta(), - 'valid_hour_end': relativedelta(), - 'model': 'MODEL_TEST', - 'obtype': 'MODEL_TEST_ANL', - 'fcst_init_hour': '000000_060000_120000_180000', - 'fcst_init_hour_beg': relativedelta(), - 'fcst_init_hour_end': relativedelta(hours=18), - 'init_hour_beg': relativedelta(), - 'init_hour_end': relativedelta(hours=18), - 'fcst_var': '', - 'fcst_level': '', - 'fcst_units': '', - 'fcst_thresh': '', - 'desc': '', - }, - ), + 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_hour': relativedelta(), + 'valid_hour': relativedelta(), + 'fcst_valid_hour_beg': relativedelta(), + 'fcst_valid_hour_end': relativedelta(), + 'valid_hour_beg': relativedelta(), + 'valid_hour_end': relativedelta(), + 'model': 'MODEL_TEST', + 'obtype': 'MODEL_TEST_ANL', + 'fcst_init_hour': '000000_060000_120000_180000', + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=18), + 'init_hour_beg': relativedelta(), + 'init_hour_end': relativedelta(hours=18), + 'fcst_var': '', + 'fcst_level': '', + 'fcst_units': '', + 'fcst_thresh': '', + 'desc': '', + }, + ), # Test 1 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, @@ -439,7 +441,7 @@ def test_get_lookin_dir(metplus_config): config_dict['DESC'] = '' config_dict['OBS_LEAD'] = '' config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' + config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' config_dict['OBS_VALID_HOUR'] = '' config_dict['ALPHA'] = '' config_dict['OBS_LEVEL'] = '' From 83405d429891fdc8607738d680d18bfc80e8b24c Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 15:00:33 -0600 Subject: [PATCH 33/92] Added test to ensure that reading FCST_LEAD_LIST1, FCST_LEAD_LIST2, etc. works as expected. Moved logic to set config_dict to function, changed default values to unset hour lists, set hour lists as needed in tests. --- .../stat_analysis/test_stat_analysis.py | 107 +++++++++++------- 1 file changed, 68 insertions(+), 39 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index c1d1a864d1..b9926b1844 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -27,6 +27,60 @@ def stat_analysis_wrapper(metplus_config): handle_tmp_dir(config) return StatAnalysisWrapper(config) + +def _set_config_dict_values(): + config_dict = {} + config_dict['FCST_VALID_HOUR'] = '' + config_dict['FCST_VAR'] = '' + config_dict['FCST_LEVEL'] = '' + config_dict['INTERP_MTHD'] = '' + config_dict['MODEL'] = '"MODEL_TEST"' + config_dict['VX_MASK'] = '' + config_dict['OBS_INIT_HOUR'] = '' + config_dict['COV_THRESH'] = '' + config_dict['OBS_UNITS'] = '' + config_dict['FCST_THRESH'] = '' + config_dict['OBS_VAR'] = '' + config_dict['FCST_INIT_HOUR'] = '' + config_dict['INTERP_PNTS'] = '' + config_dict['FCST_LEAD'] = '' + config_dict['LINE_TYPE'] = '' + config_dict['FCST_UNITS'] = '' + config_dict['DESC'] = '' + config_dict['OBS_LEAD'] = '' + config_dict['OBS_THRESH'] = '' + config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' + config_dict['OBS_VALID_HOUR'] = '' + config_dict['ALPHA'] = '' + config_dict['OBS_LEVEL'] = '' + return config_dict + + +@pytest.mark.parametrize( + 'list_name, config_overrides, expected_value', [ + ('FCST_LEAD_LIST', {'FCST_LEAD_LIST': '12'}, ['12']), + ('FCST_LEAD_LIST', {'FCST_LEAD_LIST': '12,24'}, ['12', '24']), + ('FCST_LEAD_LIST', + {'FCST_LEAD_LIST1': '12,24', 'FCST_LEAD_LIST2': '48,96'}, + ['12,24', '48,96']), + ('FCST_LEAD_LIST', + {'FCST_LEAD_LIST1': 'begin_end_incr(12,24,12)', + 'FCST_LEAD_LIST2': 'begin_end_incr(48,96,48)'}, + ['12,24', '48,96']), + ] +) +@pytest.mark.wrapper_d +def test_format_conf_list(metplus_config, list_name, config_overrides, + expected_value): + config = metplus_config() + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + + assert wrapper._format_conf_list(list_name) == expected_value + + @pytest.mark.parametrize( 'input_str, expected_output', [ ('', []), @@ -203,40 +257,12 @@ def test_format_thresh(metplus_config, expression, expected_result): assert st.format_thresh(expression) == expected_result -def _set_config_dict_values(): - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '0' - config_dict['FCST_VAR'] = '' - config_dict['FCST_LEVEL'] = '' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '' - return config_dict - - @pytest.mark.parametrize( 'lists_to_loop,c_dict_overrides,config_dict_overrides,expected_values', [ # Test 0 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, - {}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18'}, {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), 'fcst_valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), @@ -264,7 +290,8 @@ def _set_config_dict_values(): # Test 1 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, - {'FCST_LEAD': '24'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18', + 'FCST_LEAD': '24'}, {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), 'fcst_lead_totalsec': '86400', @@ -281,9 +308,9 @@ def _set_config_dict_values(): ), # Test 2 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], - {'DATE_BEG': '20190101', 'DATE_END': '20190101', - 'DATE_TYPE': 'VALID'}, - {'FCST_LEAD': '120'}, + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18', + 'FCST_LEAD': '120'}, {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), 'fcst_lead_totalsec': '432000', @@ -300,9 +327,8 @@ def _set_config_dict_values(): ), # Test 3 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], - {'DATE_BEG': '20190101', 'DATE_END': '20190105', - 'DATE_TYPE': 'INIT'}, - {}, + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18'}, {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), 'init_end': datetime.datetime(2019, 1, 5, 18, 0, 0), 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), @@ -315,9 +341,8 @@ def _set_config_dict_values(): ), # Test 4 (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], - {'DATE_BEG': '20190101', 'DATE_END': '20190101', - 'DATE_TYPE': 'INIT'}, - {'FCST_INIT_HOUR': '', 'FCST_LEAD': ''}, + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '', 'FCST_LEAD': ''}, {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), 'init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), @@ -354,10 +379,12 @@ def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, lists_to_group, config_dict) + print(test_stringsub_dict) for key, value in expected_values.items(): print(f'key: {key}') assert test_stringsub_dict[key] == value + @pytest.mark.parametrize( 'filename_template, output_type, filename_type,expected_output', [ (('{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/' @@ -389,6 +416,8 @@ def test_get_output_filename(metplus_config, filename_template, output_type, # as expected st = stat_analysis_wrapper(metplus_config) config_dict = _set_config_dict_values() + config_dict['FCST_VALID_HOUR'] = '0' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' From 37b42c5bf2a78bceb272c0a3552145b17b01ba79 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 15:01:24 -0600 Subject: [PATCH 34/92] added logic to compute init_beg/end from valid_beg/end and forecast leads, and vice versa. ci-run-diff --- .../stat_analysis/test_stat_analysis.py | 22 ++++ metplus/wrappers/stat_analysis_wrapper.py | 104 ++++++++++++++---- 2 files changed, 104 insertions(+), 22 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index b9926b1844..a834b4af87 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -355,6 +355,28 @@ def test_format_thresh(metplus_config, expression, expected_result): 'obs_init_hour_end': relativedelta(hours=23, minutes=59, seconds=59), }, ), + # Test 5 - check computed init_beg/end + (['FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', + 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_LEAD': '12,24'}, + {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'init_beg': datetime.datetime(2018, 12, 31, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 4, 12, 0, 0), + }, + ), + # Test 6 - check computed valid_beg/end + (['FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', + 'DATE_TYPE': 'INIT'}, + {'FCST_INIT_HOUR': '0', 'FCST_LEAD': '12,24'}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'valid_beg': datetime.datetime(2019, 1, 1, 12, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 6, 0, 0, 0), + }, + ), ] ) @pytest.mark.wrapper_d diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 0c79a13135..e3655290d2 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -508,6 +508,11 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict[f'FCST_{date_type}_HOUR'], config_dict[f'OBS_{date_type}_HOUR']) + # handle opposite of date_type VALID if INIT and vice versa + self._set_strinsub_other(stringsub_dict, date_type.lower(), + config_dict['FCST_LEAD'], + config_dict['OBS_LEAD']) + # Set loop information for loop_or_group_list in lists_to_loop + lists_to_group: list_name = loop_or_group_list.replace('_LIST', '') @@ -522,7 +527,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict['obtype'] = ( config_dict['OBTYPE'].replace('"', '').replace(' ', '') ) - # TODO: HOUR can be used for both loop and group + elif 'HOUR' in list_name: # TODO: should this only handle opposite of date_type? delta_list = self._get_relativedelta_list(config_dict[list_name]) @@ -569,7 +574,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict[generic_list] = ( stringsub_dict[f'{sub_name}_end'] ) - # TODO: LEAD can be used for both loop and group + elif 'LEAD' in list_name: lead_list = self._get_met_time_list(config_dict[list_name]) @@ -621,7 +626,8 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): return stringsub_dict def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): - """! Set string sub dictionary _beg and _end values for fcst and obs. + """! Set string sub dictionary _beg and _end values for fcst and obs + hour lists. Set other values depending on values set in fcst and obs hour lists. Values that are set depend on what it set in c_dict DATE_TYPE, which is either INIT or VALID. @@ -630,6 +636,7 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): @param fcst_hour_str string with list of forecast hours to process @param obs_hour_str string with list of observation hours to process """ + # date_type is valid or init depending on LOOP_BY date_type = self.c_dict['DATE_TYPE'].lower() if fcst_hour_str: fcst_hour_list = self._get_relativedelta_list(fcst_hour_str) @@ -644,32 +651,16 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): self._set_stringsub_hours_item(sub_dict, 'fcst', fcst_hour_list) self._set_stringsub_hours_item(sub_dict, 'obs', obs_hour_list) - # if fcst and obs hour lists the same or if fcst is set but not obs, - # set {date_type}_beg/end to fcst_{date_type}_beg/end - if (fcst_hour_list == obs_hour_list or - (fcst_hour_list and not obs_hour_list)): - sub_dict[f'{date_type}_beg'] = sub_dict[f'fcst_{date_type}_beg'] - sub_dict[f'{date_type}_end'] = sub_dict[f'fcst_{date_type}_end'] + self._set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, date_type) - # if {date_type} beg and end are the same, set {date_type} - if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: - sub_dict[date_type] = sub_dict[f'{date_type}_end'] - - # if fcst hr list is set but obs hr list is not, - # set {date_type}_beg/end to obs_{date_type}_beg/end - elif not fcst_hour_list and obs_hour_list: - sub_dict[f'{date_type}_beg'] = sub_dict[f'obs_{date_type}_beg'] - sub_dict[f'{date_type}_end'] = sub_dict[f'obs_{date_type}_end'] - - # if {date_type} beg and end are the same, set {date_type} - if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: - sub_dict[date_type] = sub_dict[f'{date_type}_beg'] # if neither fcst or obs hr list are set, # {date_type}_beg/end and {date_type} are not set at all (empty string) def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): """! Set either fcst or obs values in string sub dictionary. + Values that are set depend on what it set in c_dict DATE_TYPE, which + is either INIT or VALID. @param sub_dict dictionary to set string sub values @param fcst_or_obs string to note processing either fcst or obs @@ -705,6 +696,75 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): ) ) + def _set_stringsub_generic(self, sub_dict, fcst_hour_list, obs_hour_list, + date_type): + # if fcst and obs hour lists the same or if fcst is set but not obs, + # set {date_type}_beg/end to fcst_{date_type}_beg/end + if (fcst_hour_list == obs_hour_list or + (fcst_hour_list and not obs_hour_list)): + sub_dict[f'{date_type}_beg'] = sub_dict[f'fcst_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'fcst_{date_type}_end'] + + # if fcst hr list is set but obs hr list is not, + # set {date_type}_beg/end to obs_{date_type}_beg/end + elif not fcst_hour_list and obs_hour_list: + sub_dict[f'{date_type}_beg'] = sub_dict[f'obs_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'obs_{date_type}_end'] + + # if {date_type} beg and end are the same, set {date_type} + if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: + sub_dict[date_type] = sub_dict[f'{date_type}_beg'] + + def _set_strinsub_other(self, sub_dict, date_type, fcst_lead_str, + obs_lead_str): + if fcst_lead_str: + fcst_lead_list = self._get_relativedelta_list(fcst_lead_str) + else: + fcst_lead_list = None + + if obs_lead_str: + obs_lead_list = self._get_relativedelta_list(obs_lead_str) + else: + obs_lead_list = None + + other_type = 'valid' if date_type == 'init' else 'init' + self._set_strinsub_other_item(sub_dict, date_type, 'fcst', + fcst_lead_list) + self._set_strinsub_other_item(sub_dict, date_type, 'obs', + obs_lead_list) + self._set_stringsub_generic(sub_dict, fcst_lead_list, obs_lead_list, + other_type) + + def _set_strinsub_other_item(self, sub_dict, date_type, fcst_or_obs, + hour_list): + """! Compute other type's begin and end values using the beg/end and + min/max forecast leads. For example, if date_type is init, compute + valid_beg using init_beg with min lead and compute valid_end using + init_end with max lead. + + """ + other_type = 'valid' if date_type == 'init' else 'init' + date_prefix = f'{fcst_or_obs}_{date_type}' + other_prefix = f'{fcst_or_obs}_{other_type}' + if not hour_list: + sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] + sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] + return + + min_lead = hour_list[0] + max_lead = hour_list[-1] + # else: + # min_lead = relativedelta() + # max_lead = relativedelta() + + if date_type == 'init': + sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] + min_lead + sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] + max_lead + else: + sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] - max_lead + sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] - min_lead + + def get_output_filename(self, output_type, filename_template, filename_type, lists_to_loop, lists_to_group, config_dict): From fd89fcb02351a3772832951c3148de7d14c07414 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 5 Oct 2022 15:08:46 -0600 Subject: [PATCH 35/92] fixed logic that was using date beg + 23:59:59 for the end time if no hours are set instead of using date END + 23:59:59, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index e3655290d2..74020aee02 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -684,7 +684,6 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): # if fcst hour list is not set, use date beg 000000-235959 as # fcst_{date_type}_beg/end - # TODO: should be date beg 000000 and date end 235959? sub_dict[f'{prefix}_beg'] = ( datetime.datetime.strptime( date_beg + '000000', '%Y%m%d%H%M%S' @@ -692,7 +691,7 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): ) sub_dict[f'{prefix}_end'] = ( datetime.datetime.strptime( - date_beg + '235959', '%Y%m%d%H%M%S' + date_end + '235959', '%Y%m%d%H%M%S' ) ) @@ -753,9 +752,6 @@ def _set_strinsub_other_item(self, sub_dict, date_type, fcst_or_obs, min_lead = hour_list[0] max_lead = hour_list[-1] - # else: - # min_lead = relativedelta() - # max_lead = relativedelta() if date_type == 'init': sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] + min_lead @@ -764,7 +760,6 @@ def _set_strinsub_other_item(self, sub_dict, date_type, fcst_or_obs, sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] - max_lead sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] - min_lead - def get_output_filename(self, output_type, filename_template, filename_type, lists_to_loop, lists_to_group, config_dict): From 32ec575538324dae315a5430627f795a03af84c6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 10 Oct 2022 12:57:50 -0600 Subject: [PATCH 36/92] cleanup and document new logic --- metplus/wrappers/stat_analysis_wrapper.py | 208 +++++++++++++--------- 1 file changed, 121 insertions(+), 87 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 74020aee02..2298d37b35 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -180,6 +180,7 @@ def create_c_dict(self): self.config.getraw('config', 'STAT_ANALYSIS_OUTPUT_TEMPLATE', '') ) + # set date type, which is typically controlled by LOOP_BY c_dict['DATE_TYPE'] = self.config.getstr('config', 'DATE_TYPE', self.config.getstr('config', @@ -195,27 +196,17 @@ def create_c_dict(self): c_dict['DATE_END'] = end_dt.strftime('%Y%m%d') # read jobs from STAT_ANALYSIS_JOB or legacy JOB_NAME/ARGS if unset - c_dict['JOBS'] = [] - job_indices = list( - find_indices_in_config_section(r'STAT_ANALYSIS_JOB(\d+)$', - self.config, - index_index=1).keys() - ) - - if job_indices: - for j_id in job_indices: - job = self.config.getraw('config', f'STAT_ANALYSIS_JOB{j_id}') - c_dict['JOBS'].append(job) - else: - job_name = self.config.getraw('config', 'STAT_ANALYSIS_JOB_NAME') - job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') - c_dict['JOBS'].append(f'-job {job_name} {job_args}') + c_dict['JOBS'] = self._read_jobs_from_config() # read all lists and check if field lists are all empty - c_dict['all_field_lists_empty'] = self.read_lists_from_config(c_dict) + all_field_lists_empty = self.read_lists_from_config(c_dict) + + # read any [FCST/OBS]_VAR_* variables if they are set c_dict['VAR_LIST'] = parse_var_list(self.config) c_dict['MODEL_INFO_LIST'] = self.parse_model_info() + + # if MODEL_LIST was not set, populate it from the model info list if not c_dict['MODEL_LIST'] and c_dict['MODEL_INFO_LIST']: self.logger.warning("MODEL_LIST was left blank, " + "creating with MODELn information.") @@ -224,13 +215,33 @@ def create_c_dict(self): c_dict = self.set_lists_loop_or_group(c_dict) + # read MET config settings that will apply to every run self.add_met_config(name='hss_ec_value', data_type='float', metplus_configs=['STAT_ANALYSIS_HSS_EC_VALUE']) - return self.c_dict_error_check(c_dict) + return self.c_dict_error_check(c_dict, all_field_lists_empty) - def c_dict_error_check(self, c_dict): + def _read_jobs_from_config(self): + jobs = [] + job_indices = list( + find_indices_in_config_section(r'STAT_ANALYSIS_JOB(\d+)$', + self.config, + index_index=1).keys() + ) + + if job_indices: + for j_id in job_indices: + job = self.config.getraw('config', f'STAT_ANALYSIS_JOB{j_id}') + jobs.append(job) + else: + job_name = self.config.getraw('config', 'STAT_ANALYSIS_JOB_NAME') + job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') + jobs.append(f'-job {job_name} {job_args}') + + return jobs + + def c_dict_error_check(self, c_dict, all_field_lists_empty): if not c_dict.get('CONFIG_FILE'): if len(c_dict['JOBS']) > 1: @@ -265,14 +276,13 @@ def c_dict_error_check(self, c_dict): self.log_error("DATE_TYPE must be VALID or INIT") # if var list is set and field lists are not all empty, error - if c_dict['VAR_LIST'] and not c_dict['all_field_lists_empty']: + if c_dict['VAR_LIST'] and not all_field_lists_empty: self.log_error("Field information defined in both " "[FCST/OBS]_VAR_LIST and " "[FCST/OBS]_VAR_[NAME/LEVELS]. Use " "one or the other formats to run") - # if MODEL_LIST was not set in config, populate it from the model info list - # if model info list is also not set, report and error + # if model list and info list were not set, report and error if not c_dict['MODEL_LIST'] and not c_dict['MODEL_INFO_LIST']: self.log_error("No model information was found.") @@ -396,7 +406,7 @@ def _get_met_time_list(string_value, sort_list=True): sort_list=sort_list) @staticmethod - def _get_relativedelta_list(string_value, sort_list=True): + def _get_delta_list(string_value, sort_list=True): return StatAnalysisWrapper._format_time_list(string_value, get_met_format=False, sort_list=sort_list) @@ -406,44 +416,35 @@ def set_lists_loop_or_group(self, c_dict): should treat the items in that list as a group or items to be looped over based on user settings, the values in the list, and process being run. - - Args: - @param group_items list of the METplus config list - names to group the list's items set by user - @param loop_items list of the METplus config list - names to loop over the list's items set by user - @param config_dict dictionary containing the - configuration information - + + @param c_dict dictionary containing the configuration information + @returns tuple containing lists_to_group_items ( list of all the list names whose items are being grouped together) and lists_to_loop_items (list of all the list names whose items are being looped over) """ - # get list of config variables not found in either - # GROUP_LIST_ITEMS or LOOP_LIST_ITEMS + # get list of list variables not found in group or loop lists missing_config_list = [conf for conf in self.EXPECTED_CONFIG_LISTS - if conf not in c_dict['GROUP_LIST_ITEMS']] - missing_config_list = [conf for conf in missing_config_list - if conf not in c_dict['LOOP_LIST_ITEMS']] - found_config_list = [conf for conf in self.EXPECTED_CONFIG_LISTS - if conf not in missing_config_list] + if conf not in c_dict['GROUP_LIST_ITEMS'] + and conf not in c_dict['LOOP_LIST_ITEMS']] - # loop through lists not found in either loop or group lists # add missing lists to group_lists for missing_config in missing_config_list: c_dict['GROUP_LIST_ITEMS'].append(missing_config) - # loop through lists found in either loop or group lists originally - for found_config in found_config_list: - # if list is empty and in loop list, warn and move to group list - if (not c_dict[found_config] and - found_config in c_dict['LOOP_LIST_ITEMS']): - self.logger.warning(found_config + " is empty, " - + "will be treated as group.") - c_dict['GROUP_LIST_ITEMS'].append(found_config) - c_dict['LOOP_LIST_ITEMS'].remove(found_config) + # move empty lists in loop lists to group lists + for list_name in c_dict['LOOP_LIST_ITEMS']: + # skip if list has values + if c_dict[list_name]: + continue + + self.logger.warning(f'{list_name} was found in LOOP_LIST_ITEMS' + ' but is empty. Moving to group list') + c_dict['GROUP_LIST_ITEMS'].append(list_name) + c_dict['LOOP_LIST_ITEMS'].remove(list_name) + # log summary of group and loop lists self.logger.debug("Items in these lists will be grouped together: " + ', '.join(c_dict['GROUP_LIST_ITEMS'])) self.logger.debug("Items in these lists will be looped over: " @@ -459,7 +460,7 @@ def format_thresh(thresh_str): Can be a comma-separated list, i.e. gt3,<=5.5, ==7 @returns string of comma-separated list of the threshold(s) with - letter format, i.e. gt3, le5.5, eq7 + letter format, i.e. gt3,le5.5,eq7 """ formatted_thresh_list = [] # separate thresholds by comma and strip off whitespace around values @@ -478,18 +479,12 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): """! Build a dictionary with list names, dates, and commonly used identifiers to pass to string_template_substitution. - Args: - lists_to_loop - list of all the list names whose items - are being grouped together - lists_to group - list of all the list names whose items - are being looped over - config_dict - dictionary containing the configuration - information - - Returns: - stringsub_dict - dictionary containing the formatted - information to pass to the - string_template_substitution + @param lists_to_loop list of all the list names whose items + are being grouped together + @param lists_to_group list of all the list names whose items + are being looped over + @param config_dict dictionary containing the configuration information + @returns dictionary with the formatted info to pass to do_string_sub """ date_type = self.c_dict['DATE_TYPE'] @@ -530,7 +525,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): elif 'HOUR' in list_name: # TODO: should this only handle opposite of date_type? - delta_list = self._get_relativedelta_list(config_dict[list_name]) + delta_list = self._get_delta_list(config_dict[list_name]) if not delta_list: stringsub_dict[sub_name] = list_name_value # TODO: should this be set to 0:0:0 to 23:59:59? @@ -591,7 +586,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict[sub_name] = lead_list[0] - lead_rd = self._get_relativedelta_list(config_dict[list_name])[0] + lead_rd = self._get_delta_list(config_dict[list_name])[0] total_sec = ti_get_seconds_from_relativedelta(lead_rd) stringsub_dict[sub_name+'_totalsec'] = str(total_sec) @@ -630,7 +625,8 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): hour lists. Set other values depending on values set in fcst and obs hour lists. Values that are set depend on what it set in c_dict DATE_TYPE, which - is either INIT or VALID. + is either INIT or VALID. If neither fcst or obs hr list are set, + {date_type}_beg/end and {date_type} are not set at all (empty string). @param sub_dict dictionary to set string sub values @param fcst_hour_str string with list of forecast hours to process @@ -639,28 +635,28 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): # date_type is valid or init depending on LOOP_BY date_type = self.c_dict['DATE_TYPE'].lower() if fcst_hour_str: - fcst_hour_list = self._get_relativedelta_list(fcst_hour_str) + fcst_hour_list = self._get_delta_list(fcst_hour_str) else: fcst_hour_list = None if obs_hour_str: - obs_hour_list = self._get_relativedelta_list(obs_hour_str) + obs_hour_list = self._get_delta_list(obs_hour_str) else: obs_hour_list = None self._set_stringsub_hours_item(sub_dict, 'fcst', fcst_hour_list) self._set_stringsub_hours_item(sub_dict, 'obs', obs_hour_list) - self._set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, date_type) - - - # if neither fcst or obs hr list are set, - # {date_type}_beg/end and {date_type} are not set at all (empty string) + self._set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, + date_type) def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): - """! Set either fcst or obs values in string sub dictionary. + """! Set either fcst or obs values in string sub dictionary, e.g. + [fcst/obs]_[init/valid]_[beg/end]. Values that are set depend on what it set in c_dict DATE_TYPE, which - is either INIT or VALID. + is either INIT or VALID. If the beg and end values are the same, then + also set the same variable without the _beg/end extension, e.g. if + fcst_valid_beg is equal to fcst_valid_end, also set fcst_valid. @param sub_dict dictionary to set string sub values @param fcst_or_obs string to note processing either fcst or obs @@ -695,8 +691,22 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): ) ) - def _set_stringsub_generic(self, sub_dict, fcst_hour_list, obs_hour_list, + @staticmethod + def _set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, date_type): + """! Set [init/valid]_[beg/end] values based on the hour lists that + are provided. + Set {date_type}_[beg/end] to fcst_{date_type}_[beg/end] if + fcst and obs lists are the same or if fcst list is set and obs is not. + Set {date_type}_[beg/end] to obs_{date_type}_[beg/end] if obs list is + set and fcst is not. + Also sets {date_type} if {date_type}_beg and {date_type}_end are equal. + + @param sub_dict dictionary to set string sub values + @param fcst_hour_list list of forecast hours or leads + @param obs_hour_list list of observation hours or leads + @param date_type type of date to process: valid or init + """ # if fcst and obs hour lists the same or if fcst is set but not obs, # set {date_type}_beg/end to fcst_{date_type}_beg/end if (fcst_hour_list == obs_hour_list or @@ -716,31 +726,47 @@ def _set_stringsub_generic(self, sub_dict, fcst_hour_list, obs_hour_list, def _set_strinsub_other(self, sub_dict, date_type, fcst_lead_str, obs_lead_str): + """! Compute beg and end values for opposite of date_type (e.g. valid + if init and vice versa) using min/max forecast leads. + + @param sub_dict dictionary to set string sub values + @param date_type type of date to process: valid or init + @param fcst_lead_str string to parse list of forecast leads + @param obs_lead_str string to parse list of observation leads + """ if fcst_lead_str: - fcst_lead_list = self._get_relativedelta_list(fcst_lead_str) + fcst_lead_list = self._get_delta_list(fcst_lead_str) else: fcst_lead_list = None if obs_lead_str: - obs_lead_list = self._get_relativedelta_list(obs_lead_str) + obs_lead_list = self._get_delta_list(obs_lead_str) else: obs_lead_list = None other_type = 'valid' if date_type == 'init' else 'init' self._set_strinsub_other_item(sub_dict, date_type, 'fcst', - fcst_lead_list) + fcst_lead_list) self._set_strinsub_other_item(sub_dict, date_type, 'obs', - obs_lead_list) + obs_lead_list) self._set_stringsub_generic(sub_dict, fcst_lead_list, obs_lead_list, other_type) - def _set_strinsub_other_item(self, sub_dict, date_type, fcst_or_obs, - hour_list): + @staticmethod + def _set_strinsub_other_item(sub_dict, date_type, fcst_or_obs, hour_list): """! Compute other type's begin and end values using the beg/end and - min/max forecast leads. For example, if date_type is init, compute - valid_beg using init_beg with min lead and compute valid_end using - init_end with max lead. + min/max forecast leads. + If date_type is init, + compute valid_beg by adding init_beg and min lead, + compute valid_end by adding init_end and max lead. + If date_type is valid, + compute init_beg by subtracting max lead from valid_beg, + compute init_end by subtracting min lead from valid_end. + @param sub_dict dictionary to set string sub values + @param date_type type of date to process: valid or init + @param fcst_or_obs string to use to process either fcst or obs + @param hour_list list of forecast leads to use to calculate times """ other_type = 'valid' if date_type == 'init' else 'init' date_prefix = f'{fcst_or_obs}_{date_type}' @@ -754,11 +780,19 @@ def _set_strinsub_other_item(self, sub_dict, date_type, fcst_or_obs, max_lead = hour_list[-1] if date_type == 'init': - sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] + min_lead - sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] + max_lead + sub_dict[f'{other_prefix}_beg'] = ( + sub_dict[f'{date_prefix}_beg'] + min_lead + ) + sub_dict[f'{other_prefix}_end'] = ( + sub_dict[f'{date_prefix}_end'] + max_lead + ) else: - sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] - max_lead - sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] - min_lead + sub_dict[f'{other_prefix}_beg'] = ( + sub_dict[f'{date_prefix}_beg'] - max_lead + ) + sub_dict[f'{other_prefix}_end'] = ( + sub_dict[f'{date_prefix}_end'] - min_lead + ) def get_output_filename(self, output_type, filename_template, filename_type, From 3a0ac73c39431141ef5b40969fab04acd10dd573 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 10 Oct 2022 16:05:01 -0600 Subject: [PATCH 37/92] refactored logic to remove unnecessary arguments, changed logic that computes all combinations of runtime settings so that it is understandable by other developers, added documentation --- .../stat_analysis/test_stat_analysis.py | 114 ++++++++++-- metplus/wrappers/stat_analysis_wrapper.py | 168 ++++++++++-------- 2 files changed, 187 insertions(+), 95 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index a834b4af87..6a299bf665 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -4,6 +4,7 @@ import os import datetime +import pprint from dateutil.relativedelta import relativedelta from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper @@ -13,6 +14,8 @@ TEST_CONF = os.path.join(os.path.dirname(__file__), 'test.conf') +pp = pprint.PrettyPrinter() + def stat_analysis_wrapper(metplus_config): """! Returns a default StatAnalysisWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration @@ -56,6 +59,87 @@ def _set_config_dict_values(): return config_dict +@pytest.mark.parametrize( + 'c_dict, expected_result', [ + # 0 + ({ + 'GROUP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST'], + 'LOOP_LIST_ITEMS': [], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1", "MODEL2"', + 'FCST_LEAD': '0, 24' + } + ] + ), + # 1 + ({ + 'GROUP_LIST_ITEMS': ['FCST_LEAD_LIST'], + 'LOOP_LIST_ITEMS': ['MODEL_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1"', 'FCST_LEAD': '0, 24'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '0, 24'}, + ] + ), + # 2 + ({ + 'GROUP_LIST_ITEMS': [], + 'LOOP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1"', 'FCST_LEAD': '0'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '0'}, + {'MODEL': '"MODEL1"', 'FCST_LEAD': '24'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '24'}, + ] + ), + # 3 + ({ + 'GROUP_LIST_ITEMS': ['DESC_LIST'], + 'LOOP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST', + 'FCST_THRESH_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + 'FCST_THRESH_LIST': ['gt3', 'ge4'], + 'DESC_LIST': ['"ONE_DESC"'], + }, + [ + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL2"'}, + ] + ), + ] +) +@pytest.mark.wrapper_d +def test_get_runtime_settings(metplus_config, c_dict, expected_result): + config = metplus_config() + wrapper = StatAnalysisWrapper(config) + + runtime_settings = wrapper.get_runtime_settings(c_dict) + pp.pprint(runtime_settings) + assert runtime_settings == expected_result + @pytest.mark.parametrize( 'list_name, config_overrides, expected_value', [ ('FCST_LEAD_LIST', {'FCST_LEAD_LIST': '12'}, ['12']), @@ -398,8 +482,9 @@ def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, lists_to_group = [item for item in st.EXPECTED_CONFIG_LISTS if item not in lists_to_loop] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) + config_dict['LISTS_TO_GROUP'] = lists_to_group + config_dict['LISTS_TO_LOOP'] = lists_to_loop + test_stringsub_dict = st.build_stringsub_dict(config_dict) print(test_stringsub_dict) for key, value in expected_values.items(): @@ -444,21 +529,13 @@ def test_get_output_filename(metplus_config, filename_template, output_type, st.c_dict['DATE_BEG'] = '20190101' st.c_dict['DATE_END'] = '20190101' st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] test_output_filename = st.get_output_filename(output_type, filename_template, filename_type, lists_to_loop, - lists_to_group, config_dict) assert expected_output == test_output_filename @@ -509,30 +586,30 @@ def test_get_lookin_dir(metplus_config): 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST'] lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] + config_dict['LISTS_TO_GROUP'] = lists_to_group + config_dict['LISTS_TO_LOOP'] = lists_to_loop + pytest_data_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'data') # Test 1 expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') dir_path = os.path.join(pytest_data_dir, 'fake/*') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st.get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 2 expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') dir_path = os.path.join(pytest_data_dir, 'fake/{valid?fmt=%Y%m%d}') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st.get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 3 - no matches for lookin dir wildcard expected_lookin_dir = '' dir_path = os.path.join(pytest_data_dir, 'fake/*nothingmatches*') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st.get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 4 - 2 paths, one with wildcard @@ -541,8 +618,7 @@ def test_get_lookin_dir(metplus_config): dir_path = os.path.join(pytest_data_dir, 'fake/*') dir_path = f'{dir_path}, {dir_path}' - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st.get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 2298d37b35..420efd6e47 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -475,7 +475,7 @@ def format_thresh(thresh_str): return ','.join(formatted_thresh_list) - def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): + def build_stringsub_dict(self, config_dict): """! Build a dictionary with list names, dates, and commonly used identifiers to pass to string_template_substitution. @@ -490,7 +490,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): stringsub_dict = {} # add all loop list and group list items to string sub keys list - for list_item in lists_to_loop + lists_to_group: + for list_item in self.EXPECTED_CONFIG_LISTS: list_name = list_item.replace('_LIST', '').lower() stringsub_dict[list_name] = '' @@ -509,7 +509,7 @@ def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): config_dict['OBS_LEAD']) # Set loop information - for loop_or_group_list in lists_to_loop + lists_to_group: + for loop_or_group_list in self.EXPECTED_CONFIG_LISTS: list_name = loop_or_group_list.replace('_LIST', '') sub_name = list_name.lower() list_name_value = ( @@ -796,7 +796,7 @@ def _set_strinsub_other_item(sub_dict, date_type, fcst_or_obs, hour_list): def get_output_filename(self, output_type, filename_template, filename_type, - lists_to_loop, lists_to_group, config_dict): + lists_to_loop,config_dict): """! Create a file name for stat_analysis output. Args: @@ -810,8 +810,6 @@ def get_output_filename(self, output_type, filename_template, default or user lists_to_loop - list of all the list names whose items are being grouped together - lists_to group - list of all the list names whose - items are being looped over config_dict - dictionary containing the configuration information @@ -823,8 +821,7 @@ def get_output_filename(self, output_type, filename_template, date_end = self.c_dict['DATE_END'] date_type = self.c_dict['DATE_TYPE'] - stringsub_dict = self.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) + stringsub_dict = self.build_stringsub_dict(config_dict) if filename_type == 'default': @@ -862,7 +859,7 @@ def get_output_filename(self, output_type, filename_template, **stringsub_dict) return output_filename - def get_lookin_dir(self, dir_path, lists_to_loop, lists_to_group, config_dict): + def get_lookin_dir(self, dir_path, config_dict): """!Fill in necessary information to get the path to the lookin directory to pass to stat_analysis. @@ -880,9 +877,7 @@ def get_lookin_dir(self, dir_path, lists_to_loop, lists_to_group, config_dict): lookin_dir - string of the filled directory from dir_path """ - stringsub_dict = self.build_stringsub_dict(lists_to_loop, - lists_to_group, - config_dict) + stringsub_dict = self.build_stringsub_dict(config_dict) dir_path_filled = do_string_sub(dir_path, **stringsub_dict) @@ -1159,7 +1154,7 @@ def get_level_list(self, data_type): return [f'"{item}"' for item in level_list] def process_job_args(self, job_type, job, model_info, - lists_to_loop_items, lists_to_group_items, runtime_settings_dict): + lists_to_loop_items, runtime_settings_dict): output_template = ( model_info[f'{job_type}_filename_template'] @@ -1173,7 +1168,6 @@ def process_job_args(self, job_type, job, model_info, output_template, filename_type, lists_to_loop_items, - lists_to_group_items, runtime_settings_dict) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], @@ -1188,7 +1182,7 @@ def process_job_args(self, job_type, job, model_info, return job - def get_runtime_settings_dict_list(self): + def get_all_runtime_settings(self): runtime_settings_dict_list = [] c_dict_list = self.get_c_dict_list() for c_dict in c_dict_list: @@ -1199,20 +1193,16 @@ def get_runtime_settings_dict_list(self): formatted_runtime_settings_dict_list = [] for runtime_settings_dict in runtime_settings_dict_list: loop_lists = c_dict['LOOP_LIST_ITEMS'] - group_lists = c_dict['GROUP_LIST_ITEMS'] # Set up stat_analysis -lookin argument, model and obs information # and stat_analysis job. - model_info = self.get_model_obtype_and_lookindir(runtime_settings_dict, - loop_lists, - group_lists, - ) + model_info = self.get_model_obtype_and_lookindir(runtime_settings_dict) if model_info is None: return None runtime_settings_dict['JOBS'] = ( self.get_job_info(model_info, runtime_settings_dict, - loop_lists, group_lists) + loop_lists) ) # get -out argument if set @@ -1222,7 +1212,6 @@ def get_runtime_settings_dict_list(self): self.c_dict['OUTPUT_TEMPLATE'], 'user', loop_lists, - group_lists, runtime_settings_dict) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], @@ -1241,63 +1230,74 @@ def get_runtime_settings_dict_list(self): return formatted_runtime_settings_dict_list def get_runtime_settings(self, c_dict): + """! Build list of all combinations of runtime settings that should be + run. Combine all group lists into a single item separated by comma. + Compute the cartesian product to get all of the different combinations + of the loop lists to create the final list of settings to run. + + @param c_dict dictionary containing [GROUP/LOOP]_LIST_ITEMS that + contain list names to group or loop, as well the actual lists which + are named the same as the values in the [GROUP/LOOP]_LIST_ITEMS but + with the _LIST extension removed. + @returns list of dictionaries that contain all of the settings to use + for a given run. + """ + runtime_setup_dict = {} - # Parse whether all expected METplus config _LIST variables - # to be treated as a loop or group. - group_lists = c_dict['GROUP_LIST_ITEMS'] - loop_lists = c_dict['LOOP_LIST_ITEMS'] + # for group items, set the value to a list with a single item that is + # a string of all items separated by a comma + for group_list in c_dict['GROUP_LIST_ITEMS']: + key = group_list.replace('_LIST', '') + runtime_setup_dict[key] = [', '.join(c_dict[group_list])] - runtime_setup_dict = {} - # Fill setup dictionary for MET config variable name - # and its value as a string for group lists. - for group_list in group_lists: - runtime_setup_dict_name = group_list.replace('_LIST', '') - runtime_setup_dict[runtime_setup_dict_name] = [ - ', '.join(c_dict[group_list]) - ] - - # Fill setup dictionary for MET config variable name - # and its value as a list for loop lists. - - for loop_list in loop_lists: - runtime_setup_dict_name = loop_list.replace('_LIST', '') - runtime_setup_dict[runtime_setup_dict_name] = ( - c_dict[loop_list] - ) + # for loop items, pass the list directly as the value + for loop_list in c_dict['LOOP_LIST_ITEMS']: + key = loop_list.replace('_LIST', '') + runtime_setup_dict[key] = c_dict[loop_list] - # Create run time dictionary with all the combinations - # of settings to be run. + # Create a dict with all the combinations of settings to be run runtime_setup_dict_names = sorted(runtime_setup_dict) - runtime_settings_dict_list = ( - [dict(zip(runtime_setup_dict_names, prod)) for prod in - itertools.product(*(runtime_setup_dict[name] for name in - runtime_setup_dict_names))] - ) - return runtime_settings_dict_list + runtime_settings_dict_list = [] - def get_field_units(self, index): - """! Get units of fcst and obs fields if set based on VAR index - @params index VAR index corresponding to other [FCST/OBS] info - @returns tuple containing forecast and observation units respectively - """ - fcst_units = self.config.getstr('config', - f'FCST_VAR{index}_UNITS', - '') - obs_units = self.config.getstr('config', - f'OBS_VAR{index}_UNITS', - '') - if not obs_units and fcst_units: - obs_units = fcst_units - if not fcst_units and obs_units: - fcst_units = obs_units + # find cartesian product (all combos of the lists) of each dict key + products = itertools.product( + *(runtime_setup_dict[name] for name in runtime_setup_dict_names) + ) + for product in products: + # pair up product values with dict keys and add them to new dict + next_dict = {} + for key, value in zip(runtime_setup_dict_names, product): + next_dict[key] = value + runtime_settings_dict_list.append(next_dict) + + # NOTE: Logic to create list of runtime settings was previously + # handled using complex list comprehension that was difficult to + # read. New logic was intended to be more readable by other developers. + # Original code is commented below for reference: + # runtime_settings_dict_list = [ + # dict(zip(runtime_setup_dict_names, prod)) for prod in + # itertools.product(*(runtime_setup_dict[name] for name in + # runtime_setup_dict_names)) + # ] - return fcst_units, obs_units + return runtime_settings_dict_list def get_c_dict_list(self): + """! Build list of config dictionaries for each field + name/level/threshold specified by the [FCST/OBS]_VAR_* config vars. + If field information was specified in the field lists + [FCST_OBS]_[VAR/UNITS/THRESH/LEVEL]_LIST instead of these + variables, then return a list with a single item that is a deep copy + of the self.c_dict. + + @returns list of dictionaries for each field to process + """ # if fields were not specified with [FCST/OBS]_VAR_* variables # return and array with only self.c_dict if not self.c_dict['VAR_LIST']: + c_dict = {} + self.add_other_lists_to_c_dict(c_dict) return [copy.deepcopy(self.c_dict)] # otherwise, use field information to build lists with single items @@ -1305,17 +1305,17 @@ def get_c_dict_list(self): var_info_list = self.c_dict['VAR_LIST'] c_dict_list = [] for var_info in var_info_list: - fcst_units, obs_units = self.get_field_units(var_info['index']) + fcst_units, obs_units = self._get_field_units(var_info['index']) run_fourier = ( self.config.getbool('config', - 'VAR' + var_info['index'] + '_FOURIER_DECOMP', + f"VAR{var_info['index']}_FOURIER_DECOMP", False) ) if run_fourier: fourier_wave_num_pairs = getlist( self.config.getstr('config', - 'VAR' + var_info['index'] + '_WAVE_NUM_LIST', + f"VAR{var_info['index']}_WAVE_NUM_LIST", '') ) else: @@ -1379,6 +1379,25 @@ def get_c_dict_list(self): return c_dict_list + def _get_field_units(self, index): + """! Get units of fcst and obs fields if set based on VAR index + + @param index VAR index corresponding to other [FCST/OBS] info + @returns tuple containing forecast and observation units respectively + """ + fcst_units = self.config.getstr('config', + f'FCST_VAR{index}_UNITS', + '') + obs_units = self.config.getstr('config', + f'OBS_VAR{index}_UNITS', + '') + if not obs_units and fcst_units: + obs_units = fcst_units + if not fcst_units and obs_units: + fcst_units = obs_units + + return fcst_units, obs_units + def add_other_lists_to_c_dict(self, c_dict): """! Using GROUP_LIST_ITEMS and LOOP_LIST_ITEMS, add lists from self.c_dict that are not already in c_dict. @@ -1394,7 +1413,7 @@ def add_other_lists_to_c_dict(self, c_dict): if list_item not in c_dict: c_dict[list_item] = self.c_dict[list_item] - def get_model_obtype_and_lookindir(self, runtime_settings_dict, loop_lists, group_lists): + def get_model_obtype_and_lookindir(self, runtime_settings_dict): """! Reads through model info dictionaries for given run. Sets lookindir command line argument. Sets MODEL and OBTYPE values in runtime setting dictionary. @param runtime_settings_dict dictionary containing all settings used in next run @@ -1421,8 +1440,6 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict, loop_lists, grou runtime_settings_dict['OBTYPE'] = '"'+model_info['obtype']+'"' lookin_dirs.append(self.get_lookin_dir(model_info['dir'], - loop_lists, - group_lists, runtime_settings_dict, ) ) @@ -1447,7 +1464,7 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict, loop_lists, grou # return last model info dict used return model_info - def get_job_info(self, model_info, runtime_settings_dict, loop_lists, group_lists): + def get_job_info(self, model_info, runtime_settings_dict, loop_lists): """! Get job information and concatenate values into a string @params model_info model information to use to determine output file paths @params runtime_settings_dict dictionary containing all settings used in next run @@ -1461,7 +1478,6 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists, group_list job, model_info, loop_lists, - group_lists, runtime_settings_dict, ) @@ -1474,7 +1490,7 @@ def run_stat_analysis(self): or initialization dates for a job defined by the user. """ - runtime_settings_dict_list = self.get_runtime_settings_dict_list() + runtime_settings_dict_list = self.get_all_runtime_settings() if not runtime_settings_dict_list: self.log_error('Could not get runtime settings dict list') return False From 8eaf348c55adabacc1dbbe6724d7daee7cdf32cb Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 10 Oct 2022 16:06:03 -0600 Subject: [PATCH 38/92] changed logic to call function that copies over relevant config list settings instead of performing a deep copy of self.c_dict since many of those settings are not needed for runtime settings, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 420efd6e47..d350d58cef 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1298,7 +1298,7 @@ def get_c_dict_list(self): if not self.c_dict['VAR_LIST']: c_dict = {} self.add_other_lists_to_c_dict(c_dict) - return [copy.deepcopy(self.c_dict)] + return [c_dict] # otherwise, use field information to build lists with single items # make individual dictionaries for each threshold From 77f029cd7840449d11c3a64d750722e0307c9225 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 10 Oct 2022 16:36:46 -0600 Subject: [PATCH 39/92] more cleanup --- metplus/wrappers/stat_analysis_wrapper.py | 68 +++++++++++------------ 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index d350d58cef..72827df278 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1319,8 +1319,6 @@ def get_c_dict_list(self): '') ) else: - # if not running fourier, use a list - # containing an empty string to loop one iteration fourier_wave_num_pairs = [''] # if no thresholds were specified, use a list @@ -1335,23 +1333,17 @@ def get_c_dict_list(self): for fcst_thresh, obs_thresh in zip(fcst_thresholds, obs_thresholds): for pair in fourier_wave_num_pairs: - c_dict = {} - c_dict['index'] = var_info['index'] - c_dict['FCST_VAR_LIST'] = [ - f'"{var_info["fcst_name"]}"' - ] - c_dict['OBS_VAR_LIST'] = [ - f'"{var_info["obs_name"]}"' - ] - c_dict['FCST_LEVEL_LIST'] = [ - f'"{var_info["fcst_level"]}"' - ] - c_dict['OBS_LEVEL_LIST'] = [ - f'"{var_info["obs_level"]}"' - ] - - c_dict['FCST_THRESH_LIST'] = [] - c_dict['OBS_THRESH_LIST'] = [] + c_dict = { + 'index': var_info['index'], + 'FCST_VAR_LIST': [f'"{var_info["fcst_name"]}"'], + 'OBS_VAR_LIST': [f'"{var_info["obs_name"]}"'], + 'FCST_LEVEL_LIST': [f'"{var_info["fcst_level"]}"'], + 'OBS_LEVEL_LIST': [f'"{var_info["obs_level"]}"'], + 'FCST_THRESH_LIST': [], 'OBS_THRESH_LIST': [], + 'FCST_UNITS_LIST': [], 'OBS_UNITS_LIST': [], + 'INTERP_MTHD_LIST': [], + } + if fcst_thresh: thresh_formatted = self.format_thresh(fcst_thresh) c_dict['FCST_THRESH_LIST'].append(thresh_formatted) @@ -1360,8 +1352,6 @@ def get_c_dict_list(self): thresh_formatted = self.format_thresh(obs_thresh) c_dict['OBS_THRESH_LIST'].append(thresh_formatted) - c_dict['FCST_UNITS_LIST'] = [] - c_dict['OBS_UNITS_LIST'] = [] if fcst_units: c_dict['FCST_UNITS_LIST'].append(f'"{fcst_units}"') if obs_units: @@ -1370,8 +1360,6 @@ def get_c_dict_list(self): c_dict['run_fourier'] = run_fourier if pair: c_dict['INTERP_MTHD_LIST'] = ['WV1_' + pair] - else: - c_dict['INTERP_MTHD_LIST'] = [] self.add_other_lists_to_c_dict(c_dict) @@ -1414,10 +1402,12 @@ def add_other_lists_to_c_dict(self, c_dict): c_dict[list_item] = self.c_dict[list_item] def get_model_obtype_and_lookindir(self, runtime_settings_dict): - """! Reads through model info dictionaries for given run. Sets lookindir command line - argument. Sets MODEL and OBTYPE values in runtime setting dictionary. - @param runtime_settings_dict dictionary containing all settings used in next run - @returns last model info dictionary is successful, None if not. + """! Reads through model info dictionaries for given run. + Sets lookindir command line argument. Sets MODEL and OBTYPE values in + runtime setting dictionary. + + @param runtime_settings_dict dictionary with all settings used in run + @returns last model info dictionary is successful, None if not. """ lookin_dirs = [] model_list = [] @@ -1425,7 +1415,10 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): obtype_list = [] dump_row_filename_list = [] # get list of models to process - models_to_run = [model.strip().replace('"', '') for model in runtime_settings_dict['MODEL'].split(',')] + models_to_run = [ + model.strip().replace('"', '') + for model in runtime_settings_dict['MODEL'].split(',') + ] for model_info in self.c_dict['MODEL_INFO_LIST']: # skip model if not in list of models to process if model_info['name'] not in models_to_run: @@ -1434,15 +1427,16 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): model_list.append(model_info['name']) reference_list.append(model_info['reference_name']) obtype_list.append(model_info['obtype']) - dump_row_filename_list.append(model_info['dump_row_filename_template']) + dump_row_filename_list.append( + model_info['dump_row_filename_template'] + ) # set MODEL and OBTYPE to single item to find lookin dir - runtime_settings_dict['MODEL'] = '"'+model_info['name']+'"' - runtime_settings_dict['OBTYPE'] = '"'+model_info['obtype']+'"' + runtime_settings_dict['MODEL'] = f'"{model_info["name"]}"' + runtime_settings_dict['OBTYPE'] = f'"{model_info["obtype"]}"' - lookin_dirs.append(self.get_lookin_dir(model_info['dir'], - runtime_settings_dict, - ) - ) + lookin_dirs.append( + self.get_lookin_dir(model_info['dir'], runtime_settings_dict) + ) # set lookin dir command line argument runtime_settings_dict['LOOKIN_DIR'] = ' '.join(lookin_dirs) @@ -1458,7 +1452,9 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): # set values in runtime settings dict for model and obtype runtime_settings_dict['MODEL'] = self.list_to_str(model_list) - runtime_settings_dict['MODEL_REFERENCE_NAME'] = self.list_to_str(reference_list) + runtime_settings_dict['MODEL_REFERENCE_NAME'] = ( + self.list_to_str(reference_list) + ) runtime_settings_dict['OBTYPE'] = self.list_to_str(obtype_list) # return last model info dict used From da6c63826133930428b263c2d03c759c580eac43 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 12:21:02 -0600 Subject: [PATCH 40/92] added info specific to StatAnalysis wrapper to documentation --- docs/Users_Guide/wrappers.rst | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 2ddb4feb10..7c2f2cf6de 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6746,6 +6746,39 @@ The StatAnalysis wrapper encapsulates the behavior of the MET stat_analysis tool. It provides the infrastructure to summarize and filter the MET .stat files. +This wrapper is configured differently than many of the other wrappers that +loop over multiple run times. The StatAnalysis wrapper is designed to process +a range of run times at once using filtering to subset what is processed. +The VALID_BEG and VALID_END or INIT_BEG and INIT_END variables are used to +calculate filtering criteria. +The LEAD_SEQ variable that typically defines a list of forecast leads to +process is not used by the wrapper. Instead the FCST_LEAD_LIST and +OBS_LEAD_LIST are used to filter out forecast leads from the data. + +There are many configuration variables that end with \_LIST that control +settings in the STATAnalysisConfig_wrapped file. +For example, MODEL_LIST controls the model variable in the MET config file and +FCST_LEAD_LIST controls the fcst_lead variable. The value for each of these +\_LIST variables can be a list of values separated by comma. +The value of GROUP_LIST_ITEMS is a comma-separated list of \_LIST variable +names that will be grouped together for each call to stat_analysis. +The value of LOOP_LIST_ITEMS is a comma-separated list of \_LIST variable +names that will be looped over to create multiple calls to stat_analysis. +The tool will be called with every combination of the LOOP_LIST_ITEMS +list values. List variables that are not included in either GROUP_LIST_ITEMS +or LOOP_LIST_ITEMS will be automatically added to GROUP_LIST_ITEMS. Lists +defined in LOOP_LIST_ITEMS that are empty lists will be automatically moved +to GROUP_LIST_ITEMS. + +Output files: -dump_row, -out_stat, and -out + +Config file optional + +New in v5.0.0: Multiple jobs + +New in v5.0.0: Looping over groups of list items + + METplus Configuration --------------------- From 8dc34e3b918e9bc0c0483ad10eeb12b766cb2941 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 12:21:26 -0600 Subject: [PATCH 41/92] removed LOOP_ORDER and LEAD_SEQ from basic use case because they are not used --- .../use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf | 4 ---- 1 file changed, 4 deletions(-) diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf index cbcc88d7a8..14eb180192 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf @@ -31,10 +31,6 @@ VALID_BEG=2005080700 VALID_END=2005080700 VALID_INCREMENT = 12H -LEAD_SEQ = 12 - -LOOP_ORDER = times - ### # File I/O From 42babcbab4726a4ceaf6ceb65c3637a079471f80 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 12:21:37 -0600 Subject: [PATCH 42/92] clean up formatting --- metplus/wrappers/stat_analysis_wrapper.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 72827df278..e8a785ac3b 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1,6 +1,6 @@ ''' Program Name: stat_analysis_wrapper.py -Contact(s): Mallory Row +Contact(s): Mallory Row, George McCabe Abstract: Runs stat_analysis History Log: Fourth version Usage: stat_analysis_wrapper.py @@ -402,14 +402,14 @@ def _format_time_list(string_value, get_met_format, sort_list=True): @staticmethod def _get_met_time_list(string_value, sort_list=True): return StatAnalysisWrapper._format_time_list(string_value, - get_met_format=True, - sort_list=sort_list) + get_met_format=True, + sort_list=sort_list) @staticmethod def _get_delta_list(string_value, sort_list=True): return StatAnalysisWrapper._format_time_list(string_value, - get_met_format=False, - sort_list=sort_list) + get_met_format=False, + sort_list=sort_list) def set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file @@ -524,7 +524,6 @@ def build_stringsub_dict(self, config_dict): ) elif 'HOUR' in list_name: - # TODO: should this only handle opposite of date_type? delta_list = self._get_delta_list(config_dict[list_name]) if not delta_list: stringsub_dict[sub_name] = list_name_value @@ -632,8 +631,6 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): @param fcst_hour_str string with list of forecast hours to process @param obs_hour_str string with list of observation hours to process """ - # date_type is valid or init depending on LOOP_BY - date_type = self.c_dict['DATE_TYPE'].lower() if fcst_hour_str: fcst_hour_list = self._get_delta_list(fcst_hour_str) else: @@ -648,7 +645,7 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): self._set_stringsub_hours_item(sub_dict, 'obs', obs_hour_list) self._set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, - date_type) + self.c_dict['DATE_TYPE'].lower()) def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): """! Set either fcst or obs values in string sub dictionary, e.g. From a4bd994afbf67fdab536ad4c64481cbe81637aa9 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 12:27:19 -0600 Subject: [PATCH 43/92] improved documentation for function and more clean up --- metplus/wrappers/stat_analysis_wrapper.py | 37 +++++++++-------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index e8a785ac3b..1cd0ac0997 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -792,27 +792,20 @@ def _set_strinsub_other_item(sub_dict, date_type, fcst_or_obs, hour_list): ) def get_output_filename(self, output_type, filename_template, - filename_type, - lists_to_loop,config_dict): + filename_type, lists_to_loop, config_dict): """! Create a file name for stat_analysis output. - - Args: - output_type - string for the type of - stat_analysis output, either - dump_row or out_stat - filename_template - string of the template to be used - to create the file name - filename_type - string of the source of the - template being used, either - default or user - lists_to_loop - list of all the list names whose - items are being grouped together - config_dict - dictionary containing the - configuration information - Returns: - output_filename - string of the filled file name - template + @param output_type string for the type of stat_analysis output, either + dump_row, out_stat, or output. Only used if filename_type is default. + @param filename_template string of the template to create the file + name. Info from the loop list items are appended to the template if + filename_type is default. + @param filename_type string of the source of the template being used, + either default or user. + @param lists_to_loop list of all the list names whose items are being + grouped together + @param config_dict dictionary containing the configuration information + @returns string of the filled file name template """ date_beg = self.c_dict['DATE_BEG'] date_end = self.c_dict['DATE_END'] @@ -1065,7 +1058,7 @@ def parse_model_info(self): model_reference_name = self.config.getstr('config', f'MODEL{m}_REFERENCE_NAME', model_name) - model_dir = self.config.getraw('dir', + model_dir = self.config.getraw('config', f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') if not model_dir: self.log_error(f"MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR must be set " @@ -1081,7 +1074,7 @@ def parse_model_info(self): for output_type in ['DUMP_ROW', 'OUT_STAT']: # if MODEL_STAT_ANALYSIS__TEMPLATE is set, use that model_filename_template = ( - self.config.getraw('filename_templates', + self.config.getraw('config', 'MODEL'+m+'_STAT_ANALYSIS_' +output_type+'_TEMPLATE') ) @@ -1089,7 +1082,7 @@ def parse_model_info(self): # if not set, use STAT_ANALYSIS__TEMPLATE if not model_filename_template: model_filename_template = ( - self.config.getraw('filename_templates', + self.config.getraw('config', 'STAT_ANALYSIS_' + output_type + '_TEMPLATE') ) From 5c1de4a9525898e7a568b4d23dc5ebd640a73ed6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 13:54:35 -0600 Subject: [PATCH 44/92] per #1862, add support for using filename template tags anywhere in the JOB args --- metplus/wrappers/stat_analysis_wrapper.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 1cd0ac0997..26100aaaf6 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1072,19 +1072,16 @@ def parse_model_info(self): return None, None for output_type in ['DUMP_ROW', 'OUT_STAT']: + var_name = f'STAT_ANALYSIS_{output_type}_TEMPLATE' # if MODEL_STAT_ANALYSIS__TEMPLATE is set, use that model_filename_template = ( - self.config.getraw('config', - 'MODEL'+m+'_STAT_ANALYSIS_' - +output_type+'_TEMPLATE') + self.config.getraw('config', f'MODEL{m}_{var_name}') ) # if not set, use STAT_ANALYSIS__TEMPLATE if not model_filename_template: model_filename_template = ( - self.config.getraw('config', - 'STAT_ANALYSIS_' - + output_type + '_TEMPLATE') + self.config.getraw('config', var_name) ) if not model_filename_template: @@ -1456,6 +1453,9 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists): @params runtime_settings_dict dictionary containing all settings used in next run @returns string containing job information to pass to StatAnalysis config file """ + # get values to substitute filename template tags + stringsub_dict = self.build_stringsub_dict(runtime_settings_dict) + jobs = [] for job in self.c_dict['JOBS']: for job_type in ['dump_row', 'out_stat']: @@ -1467,6 +1467,9 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists): runtime_settings_dict, ) + # substitute filename templates that may be found in rest of job + job = do_string_sub(job, **stringsub_dict) + jobs.append(job) return jobs From c4b1bc8e67b2105aee6ff509600e5c4d36a8ad5d Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 15:37:02 -0600 Subject: [PATCH 45/92] refactor tests to be more easily expandable --- .../stat_analysis/test_stat_analysis.py | 146 +++++++----------- 1 file changed, 58 insertions(+), 88 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 6a299bf665..be597ff05a 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -622,106 +622,76 @@ def test_get_lookin_dir(metplus_config): assert expected_lookin_dir == test_lookin_dir +@pytest.mark.parametrize( + 'c_dict_overrides, config_dict_overrides, expected_values', [ + # Test 0 + ({'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 12', + 'OBS_VALID_HOUR': '', 'OBS_INIT_HOUR': ''}, + {'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190105_000000', + 'FCST_VALID_HOUR': '"000000"', + 'FCST_INIT_HOUR': '"000000", "120000"', + }, + ), + # Test 1 + ( + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0, 12', 'FCST_INIT_HOUR': '0, 12', + 'OBS_VALID_HOUR': '', 'OBS_INIT_HOUR': ''}, + {'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190105_120000', + 'FCST_VALID_HOUR': '"000000", "120000"', + 'FCST_INIT_HOUR': '"000000", "120000"', + }, + ), + # Test 2 + ( + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '', 'FCST_INIT_HOUR': '', + 'OBS_VALID_HOUR': '000000', 'OBS_INIT_HOUR': '0, 12'}, + {'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190101_000000', + 'OBS_VALID_HOUR': '"000000"', + 'OBS_INIT_HOUR': '"000000", "120000"', + }, + ), + # Test 3 + ({'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '', 'FCST_INIT_HOUR': '', + 'OBS_VALID_HOUR': '000000', 'OBS_INIT_HOUR': '0, 12'}, + {'OBS_INIT_BEG': '20190101_000000', + 'OBS_INIT_END': '20190101_120000', + 'OBS_VALID_HOUR': '"000000"', + 'OBS_INIT_HOUR': '"000000", "120000"', + }, + ), + ] +) @pytest.mark.wrapper_d -def test_format_valid_init(metplus_config): +def test_format_valid_init(metplus_config, c_dict_overrides, + config_dict_overrides, expected_values): # Independently test the formatting # of the valid and initialization date and hours # from the METplus config file for the MET # config file and that they are formatted # correctly st = stat_analysis_wrapper(metplus_config) - # Test 1 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' - - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '0' - config_dict['FCST_INIT_HOUR'] = '0, 12' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '20190101_000000' - assert config_dict['FCST_VALID_END'] == '20190105_000000' - assert config_dict['FCST_VALID_HOUR'] == '"000000"' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '' - # Test 2 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '0, 12' - config_dict['FCST_INIT_HOUR'] = '0, 12' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '20190101_000000' - assert config_dict['FCST_VALID_END'] == '20190105_120000' - assert config_dict['FCST_VALID_HOUR'] == '"000000", "120000"' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '' - # Test 3 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' + for key, value in c_dict_overrides.items(): + st.c_dict[key] = value config_dict = {} - config_dict['FCST_VALID_HOUR'] = '' - config_dict['FCST_INIT_HOUR'] = '' - config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '0, 12' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '' - assert config_dict['FCST_VALID_END'] == '' - assert config_dict['FCST_VALID_HOUR'] == '' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '' - assert config_dict['OBS_VALID_BEG'] == '20190101_000000' - assert config_dict['OBS_VALID_END'] == '20190101_000000' - assert config_dict['OBS_VALID_HOUR'] == '"000000"' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' - # Test 3 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'INIT' + for key, value in config_dict_overrides.items(): + config_dict[key] = value - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '' - config_dict['FCST_INIT_HOUR'] = '' - config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '0, 12' config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '' - assert config_dict['FCST_VALID_END'] == '' - assert config_dict['FCST_VALID_HOUR'] == '' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '"000000"' - assert config_dict['OBS_INIT_BEG'] == '20190101_000000' - assert config_dict['OBS_INIT_END'] == '20190101_120000' - assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' + print(config_dict) + for key, value in config_dict.items(): + if key not in expected_values: + assert value == '' + else: + assert value == expected_values[key] @pytest.mark.wrapper_d From 12a2d9a1b831f830069f6705903dca2eec039f97 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 15:37:34 -0600 Subject: [PATCH 46/92] cleaned up logic to be more maintainable, ci-run-diff --- metplus/wrappers/stat_analysis_wrapper.py | 186 +++++----------------- 1 file changed, 43 insertions(+), 143 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 26100aaaf6..5e681966a4 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -888,156 +888,57 @@ def format_valid_init(self, config_dict): """! Format the valid and initialization dates and hours for the MET stat_analysis config file. - Args: - config_dict - dictionary containing the - configuration information - - Returns: - config_dict - dictionary containing the - edited configuration information - for valid and initialization dates - and hours + @param config_dict dictionary containing the configuration information + @returns dictionary containing the edited configuration information + for valid and initialization dates and hours """ - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] - date_type = self.c_dict['DATE_TYPE'] - for list_name in self.FORMAT_LISTS: list_name = list_name.replace('_LIST', '') values = self._get_met_time_list(config_dict.get(list_name, '')) values = [f'"{item}"' for item in values] config_dict[list_name] = ', '.join(values) - fcst_valid_hour_list = config_dict['FCST_VALID_HOUR'].split(', ') - fcst_init_hour_list = config_dict['FCST_INIT_HOUR'].split(', ') - obs_valid_hour_list = config_dict['OBS_VALID_HOUR'].split(', ') - obs_init_hour_list = config_dict['OBS_INIT_HOUR'].split(', ') - nfcst_valid_hour = len(fcst_valid_hour_list) - nfcst_init_hour = len(fcst_init_hour_list) - nobs_valid_hour = len(obs_valid_hour_list) - nobs_init_hour = len(obs_init_hour_list) - if nfcst_valid_hour > 1: - if date_type == 'VALID': - fcst_valid_hour_beg = fcst_valid_hour_list[0].replace('"','') - fcst_valid_hour_end = fcst_valid_hour_list[-1].replace('"','') - config_dict['FCST_VALID_BEG'] = ( - str(date_beg)+'_'+fcst_valid_hour_beg - ) - config_dict['FCST_VALID_END'] = ( - str(date_end)+'_'+fcst_valid_hour_end - ) - elif date_type == 'INIT': - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - elif nfcst_valid_hour == 1 and fcst_valid_hour_list != ['']: - fcst_valid_hour_now = fcst_valid_hour_list[0].replace('"','') - config_dict['FCST_VALID_HOUR'] = '"'+fcst_valid_hour_now+'"' - if date_type == 'VALID': - config_dict['FCST_VALID_BEG'] = ( - str(date_beg)+'_'+fcst_valid_hour_now - ) - config_dict['FCST_VALID_END'] = ( - str(date_end)+'_'+fcst_valid_hour_now - ) - elif date_type == 'INIT': - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - else: - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - config_dict['FCST_VALID_HOUR'] = '' - if nfcst_init_hour > 1: - if date_type == 'VALID': - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - elif date_type == 'INIT': - fcst_init_hour_beg = fcst_init_hour_list[0].replace('"','') - fcst_init_hour_end = fcst_init_hour_list[-1].replace('"','') - config_dict['FCST_INIT_BEG'] = ( - str(date_beg)+'_'+fcst_init_hour_beg - ) - config_dict['FCST_INIT_END'] = ( - str(date_end)+'_'+fcst_init_hour_end - ) - elif nfcst_init_hour == 1 and fcst_init_hour_list != ['']: - fcst_init_hour_now = fcst_init_hour_list[0].replace('"','') - config_dict['FCST_INIT_HOUR'] = '"'+fcst_init_hour_now+'"' - if date_type == 'VALID': - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - elif date_type == 'INIT': - config_dict['FCST_INIT_BEG'] = ( - str(date_beg)+'_'+fcst_init_hour_now - ) - config_dict['FCST_INIT_END'] = ( - str(date_end)+'_'+fcst_init_hour_now - ) - else: - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - config_dict['FCST_INIT_HOUR'] = '' - if nobs_valid_hour > 1: - if date_type == 'VALID': - obs_valid_hour_beg = obs_valid_hour_list[0].replace('"','') - obs_valid_hour_end = obs_valid_hour_list[-1].replace('"','') - config_dict['OBS_VALID_BEG'] = ( - str(date_beg)+'_'+obs_valid_hour_beg - ) - config_dict['OBS_VALID_END'] = ( - str(date_end)+'_'+obs_valid_hour_end - ) - elif date_type == 'INIT': - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - elif nobs_valid_hour == 1 and obs_valid_hour_list != ['']: - obs_valid_hour_now = obs_valid_hour_list[0].replace('"','') - config_dict['OBS_VALID_HOUR'] = '"'+obs_valid_hour_now+'"' - if date_type == 'VALID': - config_dict['OBS_VALID_BEG'] = ( - str(date_beg)+'_'+obs_valid_hour_now - ) - config_dict['OBS_VALID_END'] = ( - str(date_end)+'_'+obs_valid_hour_now - ) - elif date_type == 'INIT': - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - else: - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - config_dict['OBS_VALID_HOUR'] = '' - if nobs_init_hour > 1: - if date_type == 'VALID': - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - elif date_type == 'INIT': - obs_init_hour_beg = obs_init_hour_list[0].replace('"','') - obs_init_hour_end = obs_init_hour_list[-1].replace('"','') - config_dict['OBS_INIT_BEG'] = ( - str(date_beg)+'_'+obs_init_hour_beg - ) - config_dict['OBS_INIT_END'] = ( - str(date_end)+'_'+obs_init_hour_end - ) - elif nobs_init_hour == 1 and obs_init_hour_list != ['']: - obs_init_hour_now = obs_init_hour_list[0].replace('"','') - config_dict['OBS_INIT_HOUR'] = '"'+obs_init_hour_now+'"' - if date_type == 'VALID': - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - elif date_type == 'INIT': - config_dict['OBS_INIT_BEG'] = ( - str(date_beg)+'_'+obs_init_hour_now - ) - config_dict['OBS_INIT_END'] = ( - str(date_end)+'_'+obs_init_hour_now - ) - else: - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - config_dict['OBS_INIT_HOUR'] = '' + for fcst_or_obs in ['FCST', 'OBS']: + for init_or_valid in ['INIT', 'VALID']: + self._format_valid_init_item(config_dict, + fcst_or_obs, + init_or_valid, + self.c_dict['DATE_TYPE']) + return config_dict + def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, + date_type): + date_beg = self.c_dict['DATE_BEG'] + date_end = self.c_dict['DATE_END'] + + prefix = f'{fcst_or_obs}_{init_or_valid}' + hour_list = config_dict[f'{prefix}_HOUR'].split(', ') + + # if hour list is not set + if not hour_list or (len(hour_list) == 1 and hour_list == ['']): + #config_dict[f'{prefix}_BEG'] = '' + #config_dict[f'{prefix}_END'] = '' + #config_dict[f'{prefix}_HOUR'] = '' + return + + # if multiple hours are specified + if len(hour_list) > 1: + if date_type == init_or_valid: + hour_beg = hour_list[0].replace('"', '') + hour_end = hour_list[-1].replace('"', '') + config_dict[f'{prefix}_BEG'] = str(date_beg)+'_'+hour_beg + config_dict[f'{prefix}_END'] = str(date_end)+'_'+hour_end + + return + + # if 1 hour specified + hour_now = hour_list[0].replace('"', '') + config_dict[f'{prefix}_HOUR'] = '"'+hour_now+'"' + if date_type == init_or_valid: + config_dict[f'{prefix}_BEG'] = str(date_beg)+'_'+hour_now + config_dict[f'{prefix}_END'] = str(date_end)+'_'+hour_now + def parse_model_info(self): """! Parse for model information. @@ -1550,8 +1451,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): for mp_item in mp_items: if not runtime_settings_dict.get(mp_item, ''): continue - value = remove_quotes(runtime_settings_dict.get(mp_item, - '')) + value = remove_quotes(runtime_settings_dict.get(mp_item, '')) value = (f"{mp_item.lower()} = \"{value}\";") self.env_var_dict[f'METPLUS_{mp_item}'] = value From 64ad315b8a4e0e906167d01a48088545848d1036 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 11 Oct 2022 15:48:35 -0600 Subject: [PATCH 47/92] set INIT/VALID_BEG/END even if no corresponding HOUR list was specified to ensure that the files processed are restricted to the time range specified in the config file with INIT/VALID_BEG/END. ci-run-diff to see if this changes any of the use case output --- .../pytests/wrappers/stat_analysis/test_stat_analysis.py | 9 +++++++++ metplus/wrappers/stat_analysis_wrapper.py | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index be597ff05a..df4851b029 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -632,6 +632,8 @@ def test_get_lookin_dir(metplus_config): 'FCST_VALID_END': '20190105_000000', 'FCST_VALID_HOUR': '"000000"', 'FCST_INIT_HOUR': '"000000", "120000"', + 'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190105_235959', }, ), # Test 1 @@ -643,6 +645,8 @@ def test_get_lookin_dir(metplus_config): 'FCST_VALID_END': '20190105_120000', 'FCST_VALID_HOUR': '"000000", "120000"', 'FCST_INIT_HOUR': '"000000", "120000"', + 'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190105_235959', }, ), # Test 2 @@ -654,6 +658,8 @@ def test_get_lookin_dir(metplus_config): 'OBS_VALID_END': '20190101_000000', 'OBS_VALID_HOUR': '"000000"', 'OBS_INIT_HOUR': '"000000", "120000"', + 'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190101_235959', }, ), # Test 3 @@ -664,6 +670,8 @@ def test_get_lookin_dir(metplus_config): 'OBS_INIT_END': '20190101_120000', 'OBS_VALID_HOUR': '"000000"', 'OBS_INIT_HOUR': '"000000", "120000"', + 'FCST_INIT_BEG': '20190101_000000', + 'FCST_INIT_END': '20190101_235959', }, ), ] @@ -688,6 +696,7 @@ def test_format_valid_init(metplus_config, c_dict_overrides, config_dict = st.format_valid_init(config_dict) print(config_dict) for key, value in config_dict.items(): + print(key) if key not in expected_values: assert value == '' else: diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 5e681966a4..9f4ec74ee5 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -917,9 +917,10 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, # if hour list is not set if not hour_list or (len(hour_list) == 1 and hour_list == ['']): - #config_dict[f'{prefix}_BEG'] = '' - #config_dict[f'{prefix}_END'] = '' - #config_dict[f'{prefix}_HOUR'] = '' + if date_type == init_or_valid: + config_dict[f'{prefix}_BEG'] = f'{date_beg}_000000' + config_dict[f'{prefix}_END'] = f'{date_end}_235959' + return # if multiple hours are specified From ee0f21c5115e2800bc3239c655f9aceaf96e9d4a Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 12 Oct 2022 14:47:17 -0600 Subject: [PATCH 48/92] updated use case to read from a different point_stat stat file that has matching times to produce output when restricting the obs valid range to the range specified in the config file --- .../StatAnalysis_python_embedding.conf | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf index 72cfdee349..43ca0cc64e 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf @@ -25,14 +25,11 @@ PROCESS_LIST = StatAnalysis # https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control ### LOOP_BY = VALID -VALID_TIME_FMT = %Y%m%d%H -VALID_BEG=2005080700 -VALID_END=2005080700 +VALID_TIME_FMT = %Y%m%d +VALID_BEG=20070331 +VALID_END=20070331 VALID_INCREMENT = 12H -LEAD_SEQ = 12 - -LOOP_ORDER = times ### # File I/O @@ -40,9 +37,9 @@ LOOP_ORDER = times ### MODEL1 = WRF -MODEL1_OBTYPE = ADPSFC -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = python {INPUT_BASE}/met_test/scripts/python/read_ascii_mpr.py {INPUT_BASE}/met_test/new/point_stat_120000L_20050807_120000V.stat -MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE = stat_analysis_python_AGGR_MPR_to_SL1L2.stat +MODEL1_OBTYPE = ADPUPA +MODEL1_STAT_ANALYSIS_LOOKIN_DIR = python {INPUT_BASE}/met_test/scripts/python/read_ascii_mpr.py {INPUT_BASE}/met_test/out/point_stat/point_stat_360000L_20070331_120000V.stat + MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE = {model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}_fcstvalidhour{valid_hour?fmt=%H}0000Z_out_stat.stat STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/met_tool_wrapper/StatAnalysis_python_embedding From e50acddbe9dd2e0ae95c53c01569348ff13bece5 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 12 Oct 2022 16:05:29 -0600 Subject: [PATCH 49/92] clean up code, rearrange functions in order they are used, updated comments, fixed pylint complaints, replaced object variables not set in init, etc. --- .../stat_analysis/test_stat_analysis.py | 4 +- metplus/wrappers/stat_analysis_wrapper.py | 519 +++++++++--------- 2 files changed, 252 insertions(+), 271 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index df4851b029..0c1923ebdd 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -199,7 +199,7 @@ def test_get_command(metplus_config): +'-lookin /path/to/lookin_dir ' +'-config /path/to/STATAnalysisConfig' ) - st.lookindir = '/path/to/lookin_dir' + st.c_dict['LOOKIN_DIR'] = '/path/to/lookin_dir' st.c_dict['CONFIG_FILE'] = '/path/to/STATAnalysisConfig' test_command = st.get_command() assert expected_command == test_command @@ -776,7 +776,7 @@ def test_get_level_list(metplus_config, data_type, config_list, expected_list): saw = StatAnalysisWrapper(config) - assert saw.get_level_list(data_type) == expected_list + assert saw._get_level_list(data_type) == expected_list @pytest.mark.wrapper_d diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 9f4ec74ee5..a6051c4c75 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -10,10 +10,7 @@ Condition codes: 0 for success, 1 for failure ''' -import logging import os -import copy -import re import glob import datetime import itertools @@ -29,8 +26,8 @@ from . import CommandBuilder class StatAnalysisWrapper(CommandBuilder): - """! Wrapper to the MET tool stat_analysis which is used to filter - and summarize data from MET's point_stat, grid_stat, + """! Wrapper to the MET tool stat_analysis which is used to filter + and summarize data from MET's point_stat, grid_stat, ensemble_stat, and wavelet_stat """ @@ -135,16 +132,12 @@ def get_command(self): if self.args: cmd += ' ' + ' '.join(self.args) - if not self.lookindir: - self.log_error("No lookin directory specified") - return None - - cmd += ' -lookin ' + self.lookindir + cmd += ' -lookin ' + self.c_dict['LOOKIN_DIR'] if self.c_dict.get('CONFIG_FILE'): cmd += f" -config {self.c_dict['CONFIG_FILE']}" else: - cmd += f' {self.job_args}' + cmd += f' {self.c_dict["JOB_ARGS"]}' if self.c_dict.get('OUTPUT_FILENAME'): cmd += f" -out {self.c_dict['OUTPUT_FILENAME']}" @@ -153,14 +146,11 @@ def get_command(self): def create_c_dict(self): """! Create a data structure (dictionary) that contains all the - values set in the configuration files that are common for + values set in the configuration files that are common for stat_analysis_wrapper.py. - - Args: - - Returns: - c_dict - a dictionary containing the settings in the - configuration files unique to the wrapper + + @returns dictionary containing the settings in the configuration files + unique to the wrapper """ c_dict = super().create_c_dict() c_dict['VERBOSITY'] = ( @@ -222,6 +212,17 @@ def create_c_dict(self): return self.c_dict_error_check(c_dict, all_field_lists_empty) + def run_all_times(self): + self.run_stat_analysis() + return self.all_commands + + def run_at_time(self, input_dict): + loop_by = self.c_dict['DATE_TYPE'] + run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') + self.c_dict['DATE_BEG'] = run_date + self.c_dict['DATE_END'] = run_date + self.run_stat_analysis() + def _read_jobs_from_config(self): jobs = [] job_indices = list( @@ -300,7 +301,7 @@ def read_lists_from_config(self, c_dict): for conf_list in all_lists_to_read: if 'LEVEL_LIST' in conf_list: c_dict[conf_list] = ( - self.get_level_list(conf_list.split('_')[0]) + self._get_level_list(conf_list.split('_')[0]) ) else: c_dict[conf_list] = self._format_conf_list(conf_list) @@ -311,6 +312,28 @@ def read_lists_from_config(self, c_dict): return all_empty + def _get_level_list(self, data_type): + """!Read forecast or observation level list from config. + Format list items to match the format expected by + StatAnalysis by removing parenthesis and any quotes, + then adding back single quotes + Args: + @param data_type type of list to get, FCST or OBS + @returns list containing the formatted level list + """ + level_list = [] + + level_input = getlist( + self.config.getraw('config', f'{data_type}_LEVEL_LIST', '') + ) + + for level in level_input: + level = level.strip('(').strip(')') + level = f'{remove_quotes(level)}' + level_list.append(level) + + return [f'"{item}"' for item in level_list] + def _format_conf_list(self, conf_list): items = getlist( self.config.getraw('config', conf_list, '') @@ -355,17 +378,16 @@ def _format_conf_list(self, conf_list): @staticmethod def list_to_str(list_of_values, add_quotes=True): - """! Turn a list of values into a single string so it can be - set to an environment variable and read by the MET + """! Turn a list of values into a single string so it can be + set to an environment variable and read by the MET stat_analysis config file. - - Args: - @param list_of_values - list of values, i.e. ['value1', 'value2'] - @param add_quotes if True, add quotation marks around values - default is True - - @returns string created from list_of_values with the values separated - by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False + + @param list_of_values list of values, i.e. ['value1', 'value2'] + @param add_quotes if True, add quotation marks around values, + default is True + + @returns string created from list_of_values with the values separated + by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False """ # return empty string if list is empty if not list_of_values: @@ -413,7 +435,7 @@ def _get_delta_list(string_value, sort_list=True): def set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file - should treat the items in that list as a group or items + should treat the items in that list as a group or items to be looped over based on user settings, the values in the list, and process being run. @@ -478,7 +500,7 @@ def format_thresh(thresh_str): def build_stringsub_dict(self, config_dict): """! Build a dictionary with list names, dates, and commonly used identifiers to pass to string_template_substitution. - + @param lists_to_loop list of all the list names whose items are being grouped together @param lists_to_group list of all the list names whose items @@ -517,13 +539,7 @@ def build_stringsub_dict(self, config_dict): .replace(',', '_').replace('*', 'ALL') ) - if list_name == 'MODEL': - stringsub_dict[sub_name] = list_name_value - stringsub_dict['obtype'] = ( - config_dict['OBTYPE'].replace('"', '').replace(' ', '') - ) - - elif 'HOUR' in list_name: + if 'HOUR' in list_name: delta_list = self._get_delta_list(config_dict[list_name]) if not delta_list: stringsub_dict[sub_name] = list_name_value @@ -543,12 +559,7 @@ def build_stringsub_dict(self, config_dict): stringsub_dict[sub_name + '_beg'] = delta_list[0] stringsub_dict[sub_name + '_end'] = delta_list[-1] - if 'FCST' in list_name: - check_list = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list = config_dict[list_name.replace('OBS', - 'FCST')] + check_list = self._get_check_list(list_name, config_dict) # if opposite fcst is not set or the same, # set init/valid hour beg/end to fcst, same for obs if not check_list or config_dict[list_name] == check_list: @@ -578,9 +589,7 @@ def build_stringsub_dict(self, config_dict): # if multiple leads are specified, format lead info # using met time notation separated by underscore if len(lead_list) > 1: - stringsub_dict[sub_name] = ( - '_'.join(lead_list) - ) + stringsub_dict[sub_name] = '_'.join(lead_list) continue stringsub_dict[sub_name] = lead_list[0] @@ -593,10 +602,7 @@ def build_stringsub_dict(self, config_dict): stringsub_dict[f'{sub_name}_min'] = lead_list[0][-4:-2] stringsub_dict[f'{sub_name}_sec'] = lead_list[0][-2:] - if 'FCST' in list_name: - check_list = config_dict[list_name.replace('FCST', 'OBS')] - elif 'OBS' in list_name: - check_list = config_dict[list_name.replace('OBS', 'FCST')] + check_list = self._get_check_list(list_name, config_dict) if not check_list or config_dict[list_name] == check_list: stringsub_dict['lead'] = stringsub_dict[sub_name] stringsub_dict['lead_hour'] = ( @@ -614,11 +620,33 @@ def build_stringsub_dict(self, config_dict): else: stringsub_dict[sub_name] = list_name_value + # if list is MODEL, also set obtype + if list_name == 'MODEL': + stringsub_dict['obtype'] = ( + config_dict['OBTYPE'].replace('"', '').replace(' ', '') + ) + # Some lines for debugging if needed in future - #for key, value in stringsub_dict.items(): + # for key, value in stringsub_dict.items(): # self.logger.debug("{} ({})".format(key, value)) return stringsub_dict + @staticmethod + def _get_check_list(list_name, config_dict): + """! Helper function for getting opposite list from config dict. + + @param list_name either FCST or OBS + @param config_dict dictionary to query + @returns equivalent OBS item if list_name is FCST, + equivalent FCST item if list_name is OBS, or + None if list_name is not FCST or OBS + """ + if 'FCST' in list_name: + return config_dict[list_name.replace('FCST', 'OBS')] + if 'OBS' in list_name: + return config_dict[list_name.replace('OBS', 'FCST')] + return None + def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): """! Set string sub dictionary _beg and _end values for fcst and obs hour lists. @@ -813,6 +841,7 @@ def get_output_filename(self, output_type, filename_template, stringsub_dict = self.build_stringsub_dict(config_dict) + # if template was not specified by user, build template from values if filename_type == 'default': if date_beg == date_end: @@ -825,47 +854,41 @@ def get_output_filename(self, output_type, filename_template, date_beg+'to'+date_end ) for loop_list in lists_to_loop: - if loop_list != 'MODEL_LIST': - list_name = loop_list.replace('_LIST', '') - if 'HOUR' in list_name: - value = self._get_met_time_list(config_dict[list_name])[0] - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +value+'Z' - ) - else: - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '') - ) + # don't format MODEL because it is already in default template + if loop_list == 'MODEL_LIST': + continue + + list_name = loop_list.replace('_LIST', '') + filename_template += ( + f"_{list_name.replace('_', '').lower()}" + ) + # if add value without formatting if not HOUR variable + if 'HOUR' not in list_name: + filename_template += ( + config_dict[list_name].replace('"', '') + ) + continue + + # get first hour in MET time fmt (HHMMSS) and add with Z + value = self._get_met_time_list(config_dict[list_name])[0] + filename_template += value+'Z' + filename_template += '_' + output_type + '.stat' - self.logger.debug("Building "+output_type+" filename from " - +filename_type+" template: "+filename_template) + self.logger.debug(f"Building {output_type} filename from " + f"{filename_type} template: {filename_template}") output_filename = do_string_sub(filename_template, **stringsub_dict) return output_filename def get_lookin_dir(self, dir_path, config_dict): - """!Fill in necessary information to get the path to - the lookin directory to pass to stat_analysis. - - Args: - dir_path - string of the user provided - directory path - lists_to_loop - list of all the list names whose - items are being grouped together - lists_to group - list of all the list names whose - items are being looped over - config_dict - dictionary containing the - configuration information - - Returns: - lookin_dir - string of the filled directory - from dir_path + """!Fill in necessary information to get the path to the lookin + directory to pass to stat_analysis. Expand any wildcards. + + @param dir_path string of the user provided directory path + @param config_dict dictionary containing the configuration information + @returns string of the filled directory from dir_path """ stringsub_dict = self.build_stringsub_dict(config_dict) dir_path_filled = do_string_sub(dir_path, @@ -873,15 +896,17 @@ def get_lookin_dir(self, dir_path, config_dict): all_paths = [] for one_path in dir_path_filled.split(','): - if '*' in one_path: - self.logger.debug(f"Expanding wildcard path: {one_path}") - expand_path = glob.glob(one_path.strip()) - if not expand_path: - self.logger.warning(f"Wildcard expansion found no matches") - continue - all_paths.extend(sorted(expand_path)) - else: - all_paths.append(one_path.strip()) + if '*' not in one_path: + all_paths.append(one_path.strip()) + continue + + self.logger.debug(f"Expanding wildcard path: {one_path}") + expand_path = glob.glob(one_path.strip()) + if not expand_path: + self.logger.warning(f"Wildcard expansion found no matches") + continue + all_paths.extend(sorted(expand_path)) + return ' '.join(all_paths) def format_valid_init(self, config_dict): @@ -942,12 +967,8 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, def parse_model_info(self): """! Parse for model information. - - Args: - - Returns: - model_list - list of dictionaries containing - model information + + @returns list of dictionaries containing model information """ model_info_list = [] model_indices = list( @@ -956,15 +977,18 @@ def parse_model_info(self): index_index=1).keys() ) for m in model_indices: - model_name = self.config.getstr('config', f'MODEL{m}') - model_reference_name = self.config.getstr('config', - f'MODEL{m}_REFERENCE_NAME', - model_name) - model_dir = self.config.getraw('config', - f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') + model_name = self.config.getraw('config', f'MODEL{m}') + model_reference_name = ( + self.config.getraw('config', f'MODEL{m}_REFERENCE_NAME', + model_name) + ) + model_dir = ( + self.config.getraw('config', + f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') + ) if not model_dir: - self.log_error(f"MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR must be set " - f"if MODEL{m} is set.") + self.log_error(f"MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR must be " + f"set if MODEL{m} is set.") return None, None model_obtype = self.config.getstr('config', f'MODEL{m}_OBTYPE', '') @@ -975,7 +999,7 @@ def parse_model_info(self): for output_type in ['DUMP_ROW', 'OUT_STAT']: var_name = f'STAT_ANALYSIS_{output_type}_TEMPLATE' - # if MODEL_STAT_ANALYSIS__TEMPLATE is set, use that + # use MODEL_STAT_ANALYSIS__TEMPLATE if set model_filename_template = ( self.config.getraw('config', f'MODEL{m}_{var_name}') ) @@ -987,61 +1011,36 @@ def parse_model_info(self): ) if not model_filename_template: - model_filename_template = '{model?fmt=%s}_{obtype?fmt=%s}_' - model_filename_type = 'default' + model_filename_template = '{model?fmt=%s}_{obtype?fmt=%s}_' + model_filename_type = 'default' else: - model_filename_type = 'user' + model_filename_type = 'user' if output_type == 'DUMP_ROW': - model_dump_row_filename_template = ( - model_filename_template - ) - model_dump_row_filename_type = model_filename_type + model_dump_row_filename_template = ( + model_filename_template + ) + model_dump_row_filename_type = model_filename_type elif output_type == 'OUT_STAT': model_out_stat_filename_template = ( model_filename_template ) model_out_stat_filename_type = model_filename_type - mod = {} - mod['name'] = model_name - mod['reference_name'] = model_reference_name - mod['dir'] = model_dir - mod['obtype'] = model_obtype - mod['dump_row_filename_template'] = ( - model_dump_row_filename_template - ) - mod['dump_row_filename_type'] = model_dump_row_filename_type - mod['out_stat_filename_template'] = ( - model_out_stat_filename_template - ) - mod['out_stat_filename_type'] = model_out_stat_filename_type + mod = { + 'name': model_name, + 'reference_name': model_reference_name, + 'dir': model_dir, + 'obtype': model_obtype, + 'dump_row_filename_template': model_dump_row_filename_template, + 'dump_row_filename_type': model_dump_row_filename_type, + 'out_stat_filename_template': model_out_stat_filename_template, + 'out_stat_filename_type': model_out_stat_filename_type, + } model_info_list.append(mod) return model_info_list - def get_level_list(self, data_type): - """!Read forecast or observation level list from config. - Format list items to match the format expected by - StatAnalysis by removing parenthesis and any quotes, - then adding back single quotes - Args: - @param data_type type of list to get, FCST or OBS - @returns list containing the formatted level list - """ - level_list = [] - - level_input = getlist( - self.config.getraw('config', f'{data_type}_LEVEL_LIST', '') - ) - - for level in level_input: - level = level.strip('(').strip(')') - level = f'{remove_quotes(level)}' - level_list.append(level) - - return [f'"{item}"' for item in level_list] - def process_job_args(self, job_type, job, model_info, lists_to_loop_items, runtime_settings_dict): @@ -1080,18 +1079,16 @@ def get_all_runtime_settings(self): # Loop over run settings. formatted_runtime_settings_dict_list = [] - for runtime_settings_dict in runtime_settings_dict_list: - loop_lists = c_dict['LOOP_LIST_ITEMS'] - + for runtime_settings in runtime_settings_dict_list: # Set up stat_analysis -lookin argument, model and obs information # and stat_analysis job. - model_info = self.get_model_obtype_and_lookindir(runtime_settings_dict) + model_info = self.get_model_obtype_and_lookindir(runtime_settings) if model_info is None: return None - runtime_settings_dict['JOBS'] = ( - self.get_job_info(model_info, runtime_settings_dict, - loop_lists) + runtime_settings['JOBS'] = ( + self.get_job_info(model_info, runtime_settings, + self.c_dict['LOOP_LIST_ITEMS']) ) # get -out argument if set @@ -1100,78 +1097,21 @@ def get_all_runtime_settings(self): self.get_output_filename('output', self.c_dict['OUTPUT_TEMPLATE'], 'user', - loop_lists, - runtime_settings_dict) + self.c_dict['LOOP_LIST_ITEMS'], + runtime_settings) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], output_filename) - # add output file path to runtime_settings_dict - runtime_settings_dict['OUTPUT_FILENAME'] = output_file + # add output file path to runtime_settings + runtime_settings['OUTPUT_FILENAME'] = output_file - # Set up forecast and observation valid - # and initialization time information. - runtime_settings_dict = ( - self.format_valid_init(runtime_settings_dict) - ) - formatted_runtime_settings_dict_list.append(runtime_settings_dict) + # Set up forecast and observation valid and init time information + runtime_settings = self.format_valid_init(runtime_settings) + formatted_runtime_settings_dict_list.append(runtime_settings) return formatted_runtime_settings_dict_list - def get_runtime_settings(self, c_dict): - """! Build list of all combinations of runtime settings that should be - run. Combine all group lists into a single item separated by comma. - Compute the cartesian product to get all of the different combinations - of the loop lists to create the final list of settings to run. - - @param c_dict dictionary containing [GROUP/LOOP]_LIST_ITEMS that - contain list names to group or loop, as well the actual lists which - are named the same as the values in the [GROUP/LOOP]_LIST_ITEMS but - with the _LIST extension removed. - @returns list of dictionaries that contain all of the settings to use - for a given run. - """ - runtime_setup_dict = {} - - # for group items, set the value to a list with a single item that is - # a string of all items separated by a comma - for group_list in c_dict['GROUP_LIST_ITEMS']: - key = group_list.replace('_LIST', '') - runtime_setup_dict[key] = [', '.join(c_dict[group_list])] - - # for loop items, pass the list directly as the value - for loop_list in c_dict['LOOP_LIST_ITEMS']: - key = loop_list.replace('_LIST', '') - runtime_setup_dict[key] = c_dict[loop_list] - - # Create a dict with all the combinations of settings to be run - runtime_setup_dict_names = sorted(runtime_setup_dict) - - runtime_settings_dict_list = [] - - # find cartesian product (all combos of the lists) of each dict key - products = itertools.product( - *(runtime_setup_dict[name] for name in runtime_setup_dict_names) - ) - for product in products: - # pair up product values with dict keys and add them to new dict - next_dict = {} - for key, value in zip(runtime_setup_dict_names, product): - next_dict[key] = value - runtime_settings_dict_list.append(next_dict) - - # NOTE: Logic to create list of runtime settings was previously - # handled using complex list comprehension that was difficult to - # read. New logic was intended to be more readable by other developers. - # Original code is commented below for reference: - # runtime_settings_dict_list = [ - # dict(zip(runtime_setup_dict_names, prod)) for prod in - # itertools.product(*(runtime_setup_dict[name] for name in - # runtime_setup_dict_names)) - # ] - - return runtime_settings_dict_list - def get_c_dict_list(self): """! Build list of config dictionaries for each field name/level/threshold specified by the [FCST/OBS]_VAR_* config vars. @@ -1256,21 +1196,72 @@ def get_c_dict_list(self): return c_dict_list + @staticmethod + def get_runtime_settings(c_dict): + """! Build list of all combinations of runtime settings that should be + run. Combine all group lists into a single item separated by comma. + Compute the cartesian product to get all of the different combinations + of the loop lists to create the final list of settings to run. + + @param c_dict dictionary containing [GROUP/LOOP]_LIST_ITEMS that + contain list names to group or loop, as well the actual lists which + are named the same as the values in the [GROUP/LOOP]_LIST_ITEMS but + with the _LIST extension removed. + @returns list of dictionaries that contain all of the settings to use + for a given run. + """ + runtime_setup_dict = {} + + # for group items, set the value to a list with a single item that is + # a string of all items separated by a comma + for group_list in c_dict['GROUP_LIST_ITEMS']: + key = group_list.replace('_LIST', '') + runtime_setup_dict[key] = [', '.join(c_dict[group_list])] + + # for loop items, pass the list directly as the value + for loop_list in c_dict['LOOP_LIST_ITEMS']: + key = loop_list.replace('_LIST', '') + runtime_setup_dict[key] = c_dict[loop_list] + + # Create a dict with all the combinations of settings to be run + runtime_setup_dict_names = sorted(runtime_setup_dict) + + runtime_settings_dict_list = [] + + # find cartesian product (all combos of the lists) of each dict key + products = itertools.product( + *(runtime_setup_dict[name] for name in runtime_setup_dict_names) + ) + for product in products: + # pair up product values with dict keys and add them to new dict + next_dict = {} + for key, value in zip(runtime_setup_dict_names, product): + next_dict[key] = value + runtime_settings_dict_list.append(next_dict) + + # NOTE: Logic to create list of runtime settings was previously + # handled using complex list comprehension that was difficult to + # read. New logic was intended to be more readable by other developers. + # Original code is commented below for reference: + # runtime_settings_dict_list = [ + # dict(zip(runtime_setup_dict_names, prod)) for prod in + # itertools.product(*(runtime_setup_dict[name] for name in + # runtime_setup_dict_names)) + # ] + + return runtime_settings_dict_list + def _get_field_units(self, index): """! Get units of fcst and obs fields if set based on VAR index @param index VAR index corresponding to other [FCST/OBS] info @returns tuple containing forecast and observation units respectively """ - fcst_units = self.config.getstr('config', - f'FCST_VAR{index}_UNITS', - '') - obs_units = self.config.getstr('config', - f'OBS_VAR{index}_UNITS', - '') + fcst_units = self.config.getraw('config', f'FCST_VAR{index}_UNITS') + obs_units = self.config.getraw('config', f'OBS_VAR{index}_UNITS') if not obs_units and fcst_units: obs_units = fcst_units - if not fcst_units and obs_units: + elif not fcst_units and obs_units: fcst_units = obs_units return fcst_units, obs_units @@ -1351,9 +1342,10 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): def get_job_info(self, model_info, runtime_settings_dict, loop_lists): """! Get job information and concatenate values into a string - @params model_info model information to use to determine output file paths - @params runtime_settings_dict dictionary containing all settings used in next run - @returns string containing job information to pass to StatAnalysis config file + + @params model_info model info to use to determine output file paths + @params runtime_settings_dict dictionary with all settings for next run + @returns list of strings containing job info to pass config file """ # get values to substitute filename template tags stringsub_dict = self.build_stringsub_dict(runtime_settings_dict) @@ -1361,13 +1353,11 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists): jobs = [] for job in self.c_dict['JOBS']: for job_type in ['dump_row', 'out_stat']: - if f"-{job_type}" in job: - job = self.process_job_args(job_type, - job, - model_info, - loop_lists, - runtime_settings_dict, - ) + if f"-{job_type}" not in job: + continue + + job = self.process_job_args(job_type, job, model_info, + loop_lists, runtime_settings_dict) # substitute filename templates that may be found in rest of job job = do_string_sub(job, **stringsub_dict) @@ -1398,15 +1388,14 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): @param runtime_settings_dict_list list of dictionaries containing information needed to run a StatAnalysis job """ - for runtime_settings_dict in runtime_settings_dict_list: - if not self.create_output_directories(runtime_settings_dict): + for runtime_settings in runtime_settings_dict_list: + if not self.create_output_directories(runtime_settings): continue # Set environment variables and run stat_analysis. - for name, value in runtime_settings_dict.items(): + for name, value in runtime_settings.items(): self.add_env_var(name, value) - self.job_args = None # set METPLUS_ env vars for MET config file to be consistent # with other wrappers mp_lists = ['MODEL', @@ -1434,10 +1423,10 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): 'LINE_TYPE' ] for mp_list in mp_lists: - if not runtime_settings_dict.get(mp_list, ''): + if not runtime_settings.get(mp_list, ''): continue value = (f"{mp_list.lower()} = " - f"[{runtime_settings_dict.get(mp_list, '')}];") + f"[{runtime_settings.get(mp_list, '')}];") self.env_var_dict[f'METPLUS_{mp_list}'] = value mp_items = ['FCST_VALID_BEG', @@ -1450,14 +1439,14 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): 'OBS_INIT_END', ] for mp_item in mp_items: - if not runtime_settings_dict.get(mp_item, ''): + if not runtime_settings.get(mp_item, ''): continue - value = remove_quotes(runtime_settings_dict.get(mp_item, '')) + value = remove_quotes(runtime_settings.get(mp_item, '')) value = (f"{mp_item.lower()} = \"{value}\";") self.env_var_dict[f'METPLUS_{mp_item}'] = value value = f'jobs = ["' - value += '","'.join(runtime_settings_dict['JOBS']) + value += '","'.join(runtime_settings['JOBS']) value += '"];' self.env_var_dict[f'METPLUS_JOBS'] = value @@ -1465,13 +1454,14 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): self.set_environment_variables() # set lookin dir - self.logger.debug(f"Setting -lookin dir to {runtime_settings_dict['LOOKIN_DIR']}") - self.lookindir = runtime_settings_dict['LOOKIN_DIR'] - self.job_args = runtime_settings_dict['JOBS'][0] + self.logger.debug("Setting -lookin dir to " + f"{runtime_settings['LOOKIN_DIR']}") + self.c_dict['LOOKIN_DIR'] = runtime_settings['LOOKIN_DIR'] + self.c_dict['JOB_ARGS'] = runtime_settings['JOBS'][0] # set -out file path if requested, value will be set to None if not self.c_dict['OUTPUT_FILENAME'] = ( - runtime_settings_dict.get('OUTPUT_FILENAME') + runtime_settings.get('OUTPUT_FILENAME') ) self.build() @@ -1486,24 +1476,15 @@ def create_output_directories(self, runtime_settings_dict): @returns True if job should be run, False if it should be skipped """ run_job = True - for job_type in ['dump_row', 'out_stat', 'output']: + for job_type in ['DUMP_ROW', 'OUT_STAT', 'OUTPUT']: output_path = ( - runtime_settings_dict.get(f'{job_type.upper()}_FILENAME') + runtime_settings_dict.get(f'{job_type}_FILENAME') ) - if output_path: - if not self.find_and_check_output_file( - output_path_template=output_path): - run_job = False - - return run_job + if not output_path: + continue - def run_all_times(self): - self.run_stat_analysis() - return self.all_commands + if not self.find_and_check_output_file( + output_path_template=output_path): + run_job = False - def run_at_time(self, input_dict): - loop_by = self.c_dict['DATE_TYPE'] - run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') - self.c_dict['DATE_BEG'] = run_date - self.c_dict['DATE_END'] = run_date - self.run_stat_analysis() + return run_job From e7f719ab4aa14d7773ccc13200577f95c58630c4 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 12 Oct 2022 17:08:34 -0600 Subject: [PATCH 50/92] moved list_to_str function to string_manip util and moved tests to appropriate area --- .../string_manip/test_util_string_manip.py | 18 ++++++++++ .../stat_analysis/test_stat_analysis.py | 16 --------- metplus/util/string_manip.py | 26 ++++++++++++++ metplus/wrappers/stat_analysis_wrapper.py | 36 ++++--------------- 4 files changed, 50 insertions(+), 46 deletions(-) diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index 8c9c1b694c..541733e436 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -148,3 +148,21 @@ def test_getlist_int(): @pytest.mark.util def test_getlist_begin_end_incr(list_string, output_list): assert getlist(list_string) == output_list + + +@pytest.mark.parametrize( + 'input, add_quotes, expected_output', [ + (['a', 'b', 'c'], None, '"a", "b", "c"'), + (['0', '1', '2'], None, '"0", "1", "2"'), + (['a', 'b', 'c'], True, '"a", "b", "c"'), + (['0', '1', '2'], True, '"0", "1", "2"'), + (['a', 'b', 'c'], False, 'a, b, c'), + (['0', '1', '2'], False, '0, 1, 2'), + ] +) +@pytest.mark.util +def test_list_to_str(input, add_quotes, expected_output): + if add_quotes is None: + assert list_to_str(input) == expected_output + else: + assert list_to_str(input, add_quotes=add_quotes) == expected_output diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 0c1923ebdd..681bb909f9 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -240,22 +240,6 @@ def test_create_c_dict(metplus_config): assert c_dict['LINE_TYPE_LIST'] == [] -@pytest.mark.wrapper_d -def test_list_to_str(metplus_config): - # Independently test that a list of strings - # are being converted to a one - # string list correctly - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_list = '"a", "b", "c"' - test_list = st.list_to_str(['a', 'b', 'c']) - assert expected_list == test_list - # Test 2 - expected_list = '"0", "1", "2"' - test_list = st.list_to_str(['0', '1', '2']) - assert expected_list == test_list - - @pytest.mark.wrapper_d def test_set_lists_as_loop_or_group(metplus_config): # Independently test that the lists that are set diff --git a/metplus/util/string_manip.py b/metplus/util/string_manip.py index c0d1042f7f..1064070d9b 100644 --- a/metplus/util/string_manip.py +++ b/metplus/util/string_manip.py @@ -7,6 +7,7 @@ import re from csv import reader + def remove_quotes(input_string): """!Remove quotes from string""" if not input_string: @@ -15,6 +16,7 @@ def remove_quotes(input_string): # strip off double and single quotes return input_string.strip('"').strip("'") + def getlist(list_str, expand_begin_end_incr=True): """! Returns a list of string elements from a comma separated string of values. @@ -60,6 +62,7 @@ def getlist(list_str, expand_begin_end_incr=True): return item_list + def getlistint(list_str): """! Get list and convert all values to int @@ -88,6 +91,7 @@ def _handle_begin_end_incr(list_str): return list_str + def _begin_end_incr_findall(list_str): """! Find all instances of begin_end_incr in list string @@ -106,6 +110,7 @@ def _begin_end_incr_findall(list_str): list_str ) + def _begin_end_incr_evaluate(item): """! Expand begin_end_incr() items into a list of values @@ -143,6 +148,7 @@ def _begin_end_incr_evaluate(item): return None + def _fix_list(item_list): """! The logic that calls this function may have incorrectly split up a string that contains commas within quotation marks. This function @@ -182,3 +188,23 @@ def _fix_list(item_list): out_list.append(item) return out_list + + +def list_to_str(list_of_values, add_quotes=True): + """! Turn a list of values into a single string + + @param list_of_values list of values, i.e. ['value1', 'value2'] + @param add_quotes if True, add quotation marks around values, + default is True + + @returns string created from list_of_values with the values separated + by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False + """ + # return empty string if list is empty + if not list_of_values: + return '' + + if add_quotes: + return '"' + '", "'.join(list_of_values) + '"' + + return ', '.join(list_of_values) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index a6051c4c75..daa577d7d1 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -19,7 +19,7 @@ from ..util import getlist from ..util import met_util as util from ..util import do_string_sub, find_indices_in_config_section -from ..util import parse_var_list, remove_quotes +from ..util import parse_var_list, remove_quotes, list_to_str from ..util import get_start_and_end_times from ..util import time_string_to_met_time, get_relativedelta from ..util import ti_get_seconds_from_relativedelta @@ -376,28 +376,6 @@ def _format_conf_list(self, conf_list): return formatted_items - @staticmethod - def list_to_str(list_of_values, add_quotes=True): - """! Turn a list of values into a single string so it can be - set to an environment variable and read by the MET - stat_analysis config file. - - @param list_of_values list of values, i.e. ['value1', 'value2'] - @param add_quotes if True, add quotation marks around values, - default is True - - @returns string created from list_of_values with the values separated - by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False - """ - # return empty string if list is empty - if not list_of_values: - return '' - - if add_quotes: - return '"' + '", "'.join(list_of_values) + '"' - - return ', '.join(list_of_values) - @staticmethod def _format_time_list(string_value, get_met_format, sort_list=True): out_list = [] @@ -1291,7 +1269,7 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): """ lookin_dirs = [] model_list = [] - reference_list = [] + ref_list = [] obtype_list = [] dump_row_filename_list = [] # get list of models to process @@ -1305,7 +1283,7 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): continue model_list.append(model_info['name']) - reference_list.append(model_info['reference_name']) + ref_list.append(model_info['reference_name']) obtype_list.append(model_info['obtype']) dump_row_filename_list.append( model_info['dump_row_filename_template'] @@ -1331,11 +1309,9 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): return None # set values in runtime settings dict for model and obtype - runtime_settings_dict['MODEL'] = self.list_to_str(model_list) - runtime_settings_dict['MODEL_REFERENCE_NAME'] = ( - self.list_to_str(reference_list) - ) - runtime_settings_dict['OBTYPE'] = self.list_to_str(obtype_list) + runtime_settings_dict['MODEL'] = list_to_str(model_list) + runtime_settings_dict['MODEL_REFERENCE_NAME'] = list_to_str(ref_list) + runtime_settings_dict['OBTYPE'] = list_to_str(obtype_list) # return last model info dict used return model_info From f6a64fbf5cbe0db799370b318eb4faf9b89aec69 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:22:14 -0600 Subject: [PATCH 51/92] Changed logic to use full [INIT/VALID]_[BEG/END] times instead of truncating YYYYMMDD. If only YYYYMMDD is set in the _END time and no hours are specified in [FCST/OBS]_[INIT/VALID]_HOUR, then set HHMMSS to 23:59:59 to ensure full day is processed to preserve old behavior. --- .../stat_analysis/test_stat_analysis.py | 22 ++++--- metplus/wrappers/stat_analysis_wrapper.py | 65 +++++++++++-------- 2 files changed, 52 insertions(+), 35 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 681bb909f9..e603fca6d1 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -459,7 +459,10 @@ def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, # Test 1 for key, value in c_dict_overrides.items(): - st.c_dict[key] = value + if key in ('DATE_BEG', 'DATE_END'): + st.c_dict[key] = datetime.datetime.strptime(value, '%Y%m%d') + else: + st.c_dict[key] = value for key, value in config_dict_overrides.items(): config_dict[key] = value @@ -510,8 +513,8 @@ def test_get_output_filename(metplus_config, filename_template, output_type, config_dict['FCST_VALID_HOUR'] = '0' config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20190101', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] @@ -557,8 +560,8 @@ def test_get_lookin_dir(metplus_config): config_dict['OBS_VALID_HOUR'] = '' config_dict['ALPHA'] = '' config_dict['OBS_LEVEL'] = '' - st.c_dict['DATE_BEG'] = '20180201' - st.c_dict['DATE_END'] = '20180201' + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20180201', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20180201', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', @@ -671,7 +674,10 @@ def test_format_valid_init(metplus_config, c_dict_overrides, st = stat_analysis_wrapper(metplus_config) for key, value in c_dict_overrides.items(): - st.c_dict[key] = value + if key in ('DATE_BEG', 'DATE_END'): + st.c_dict[key] = datetime.datetime.strptime(value, '%Y%m%d') + else: + st.c_dict[key] = value config_dict = {} for key, value in config_dict_overrides.items(): @@ -732,8 +738,8 @@ def test_run_stat_analysis(metplus_config): os.remove(expected_filename) comparison_filename = (METPLUS_BASE+'/internal/tests/data/stat_data/' +'test_20190101.stat') - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20190101', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' st.run_stat_analysis() assert os.path.exists(expected_filename) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index daa577d7d1..aab51aa933 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -12,7 +12,7 @@ import os import glob -import datetime +from datetime import datetime import itertools from dateutil.relativedelta import relativedelta @@ -182,8 +182,8 @@ def create_c_dict(self): self.log_error('Could not get start and end times. ' 'VALID_BEG/END or INIT_BEG/END must be set.') else: - c_dict['DATE_BEG'] = start_dt.strftime('%Y%m%d') - c_dict['DATE_END'] = end_dt.strftime('%Y%m%d') + c_dict['DATE_BEG'] = start_dt + c_dict['DATE_END'] = end_dt # read jobs from STAT_ANALYSIS_JOB or legacy JOB_NAME/ARGS if unset c_dict['JOBS'] = self._read_jobs_from_config() @@ -669,30 +669,30 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): date_end = self.c_dict['DATE_END'] prefix = f"{fcst_or_obs}_{self.c_dict['DATE_TYPE'].lower()}" + # get YYYYMMDD of begin and end time + beg_ymd = datetime.strptime(date_beg.strftime('%Y%m%d'), '%Y%m%d') + end_ymd = datetime.strptime(date_end.strftime('%Y%m%d'), '%Y%m%d') + + # if hour list is provided, truncate begin and end time to YYYYMMDD + # and add first hour offset to begin time and last hour to end time if hour_list: - sub_dict[f'{prefix}_beg'] = ( - datetime.datetime.strptime(date_beg, '%Y%m%d') + hour_list[0] - ) - sub_dict[f'{prefix}_end'] = ( - datetime.datetime.strptime(date_end, '%Y%m%d') + hour_list[-1] - ) + sub_dict[f'{prefix}_beg'] = beg_ymd + hour_list[0] + sub_dict[f'{prefix}_end'] = end_ymd + hour_list[-1] if sub_dict[f'{prefix}_beg'] == sub_dict[f'{prefix}_end']: sub_dict[prefix] = sub_dict[f'{prefix}_beg'] return - # if fcst hour list is not set, use date beg 000000-235959 as - # fcst_{date_type}_beg/end - sub_dict[f'{prefix}_beg'] = ( - datetime.datetime.strptime( - date_beg + '000000', '%Y%m%d%H%M%S' - ) - ) - sub_dict[f'{prefix}_end'] = ( - datetime.datetime.strptime( - date_end + '235959', '%Y%m%d%H%M%S' - ) - ) + sub_dict[f'{prefix}_beg'] = date_beg + + # if end time is only YYYYMMDD, set HHMMSS to 23:59:59 + # otherwise use HHMMSS from end time + if date_end == end_ymd: + sub_dict[f'{prefix}_end'] = end_ymd + relativedelta(hours=+23, + minutes=+59, + seconds=+59) + else: + sub_dict[f'{prefix}_end'] = date_end @staticmethod def _set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, @@ -915,15 +915,26 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, date_beg = self.c_dict['DATE_BEG'] date_end = self.c_dict['DATE_END'] + ymd = '%Y%m%d' + ymd_hms = '%Y%m%d_%H%M%S' + + # get YYYYMMDD of begin and end time + beg_ymd = date_beg.strftime(ymd) + end_ymd = date_end.strftime(ymd) + prefix = f'{fcst_or_obs}_{init_or_valid}' hour_list = config_dict[f'{prefix}_HOUR'].split(', ') # if hour list is not set if not hour_list or (len(hour_list) == 1 and hour_list == ['']): if date_type == init_or_valid: - config_dict[f'{prefix}_BEG'] = f'{date_beg}_000000' - config_dict[f'{prefix}_END'] = f'{date_end}_235959' + config_dict[f'{prefix}_BEG'] = date_beg.strftime(ymd_hms) + # if end time is only YYYYMMDD, set HHHMMSS to 23:59:59 + if date_end == datetime.strptime(end_ymd, ymd): + config_dict[f'{prefix}_END'] = f'{end_ymd}_235959' + else: + config_dict[f'{prefix}_END'] = date_end.strftime(ymd_hms) return # if multiple hours are specified @@ -931,8 +942,8 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, if date_type == init_or_valid: hour_beg = hour_list[0].replace('"', '') hour_end = hour_list[-1].replace('"', '') - config_dict[f'{prefix}_BEG'] = str(date_beg)+'_'+hour_beg - config_dict[f'{prefix}_END'] = str(date_end)+'_'+hour_end + config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_beg}' + config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_end}' return @@ -940,8 +951,8 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, hour_now = hour_list[0].replace('"', '') config_dict[f'{prefix}_HOUR'] = '"'+hour_now+'"' if date_type == init_or_valid: - config_dict[f'{prefix}_BEG'] = str(date_beg)+'_'+hour_now - config_dict[f'{prefix}_END'] = str(date_end)+'_'+hour_now + config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_now}' + config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_now}' def parse_model_info(self): """! Parse for model information. From f25fa86f347f1fa81dee99300fc88c2b51aa1108 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:23:17 -0600 Subject: [PATCH 52/92] removed logic to build filename template if not specified by the user since it is not used, users should specify the template they want explicitly --- .../stat_analysis/test_stat_analysis.py | 22 +++---- metplus/wrappers/stat_analysis_wrapper.py | 57 ++----------------- 2 files changed, 11 insertions(+), 68 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index e603fca6d1..f680212e18 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -480,29 +480,25 @@ def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, @pytest.mark.parametrize( - 'filename_template, output_type, filename_type,expected_output', [ + 'filename_template, output_type, expected_output', [ (('{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/' '{model?fmt=%s}_{valid?fmt=%Y%m%d}.stat'), - 'dump_row', 'user', '00Z/MODEL_TEST/MODEL_TEST_20190101.stat'), + 'dump_row', '00Z/MODEL_TEST/MODEL_TEST_20190101.stat'), (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}_' 'fcstvalidhour000000Z_dump_row.stat'), - 'dump_row', 'user', ('MODEL_TEST_MODEL_TEST_ANL_valid20190101_' - 'fcstvalidhour000000Z_dump_row.stat') + 'dump_row', ('MODEL_TEST_MODEL_TEST_ANL_valid20190101_' + 'fcstvalidhour000000Z_dump_row.stat') ), (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}' '{valid_hour?fmt=%H}_init{fcst_init_hour?fmt=%s}.stat'), - 'out_stat', 'user', ('MODEL_TEST_MODEL_TEST_ANL_valid2019010100' - '_init000000_060000_120000_180000.stat') - ), - ('{model?fmt=%s}_{obtype?fmt=%s}', - 'out_stat', 'default', ('MODEL_TEST_MODEL_TEST_ANLvalid20190101' - '_fcstvalidhour000000Z_out_stat.stat') + 'out_stat', ('MODEL_TEST_MODEL_TEST_ANL_valid2019010100' + '_init000000_060000_120000_180000.stat') ), ] ) @pytest.mark.wrapper_d def test_get_output_filename(metplus_config, filename_template, output_type, - filename_type, expected_output): + expected_output): # Independently test the building of # the output file name # using string template substitution @@ -517,12 +513,8 @@ def test_get_output_filename(metplus_config, filename_template, output_type, st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] - test_output_filename = st.get_output_filename(output_type, filename_template, - filename_type, - lists_to_loop, config_dict) assert expected_output == test_output_filename diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index aab51aa933..a321001893 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -797,64 +797,19 @@ def _set_strinsub_other_item(sub_dict, date_type, fcst_or_obs, hour_list): sub_dict[f'{date_prefix}_end'] - min_lead ) - def get_output_filename(self, output_type, filename_template, - filename_type, lists_to_loop, config_dict): + def get_output_filename(self, output_type, filename_template, config_dict): """! Create a file name for stat_analysis output. @param output_type string for the type of stat_analysis output, either - dump_row, out_stat, or output. Only used if filename_type is default. + dump_row, out_stat, or output. @param filename_template string of the template to create the file - name. Info from the loop list items are appended to the template if - filename_type is default. - @param filename_type string of the source of the template being used, - either default or user. - @param lists_to_loop list of all the list names whose items are being - grouped together + name. @param config_dict dictionary containing the configuration information @returns string of the filled file name template """ - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] - date_type = self.c_dict['DATE_TYPE'] - stringsub_dict = self.build_stringsub_dict(config_dict) - - # if template was not specified by user, build template from values - if filename_type == 'default': - - if date_beg == date_end: - filename_template = ( - filename_template+date_type.lower()+date_beg - ) - else: - filename_template = ( - filename_template+date_type.lower()+ - date_beg+'to'+date_end - ) - for loop_list in lists_to_loop: - # don't format MODEL because it is already in default template - if loop_list == 'MODEL_LIST': - continue - - list_name = loop_list.replace('_LIST', '') - filename_template += ( - f"_{list_name.replace('_', '').lower()}" - ) - # if add value without formatting if not HOUR variable - if 'HOUR' not in list_name: - filename_template += ( - config_dict[list_name].replace('"', '') - ) - continue - - # get first hour in MET time fmt (HHMMSS) and add with Z - value = self._get_met_time_list(config_dict[list_name])[0] - filename_template += value+'Z' - - filename_template += '_' + output_type + '.stat' - self.logger.debug(f"Building {output_type} filename from " - f"{filename_type} template: {filename_template}") + f"template: {filename_template}") output_filename = do_string_sub(filename_template, **stringsub_dict) @@ -1043,8 +998,6 @@ def process_job_args(self, job_type, job, model_info, output_filename = ( self.get_output_filename(job_type, output_template, - filename_type, - lists_to_loop_items, runtime_settings_dict) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], @@ -1085,8 +1038,6 @@ def get_all_runtime_settings(self): output_filename = ( self.get_output_filename('output', self.c_dict['OUTPUT_TEMPLATE'], - 'user', - self.c_dict['LOOP_LIST_ITEMS'], runtime_settings) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], From 39bc1a1e9581595b28f4dd9bd4adefba129998f4 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:24:52 -0600 Subject: [PATCH 53/92] remove run_at_time method since it will no longer be called since LOOP_ORDER = times logic no longer exists. keeping stub of function because some of the logic can be used if/when StatAnalysis becomes a RuntimeFreq wrapper to be able to specify different time frequencies to run. It will always process the full range of times specified for now --- metplus/wrappers/stat_analysis_wrapper.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index a321001893..4ddc10f5ff 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -216,12 +216,12 @@ def run_all_times(self): self.run_stat_analysis() return self.all_commands - def run_at_time(self, input_dict): - loop_by = self.c_dict['DATE_TYPE'] - run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') - self.c_dict['DATE_BEG'] = run_date - self.c_dict['DATE_END'] = run_date - self.run_stat_analysis() + # def run_at_time(self, input_dict): + # loop_by = self.c_dict['DATE_TYPE'] + # run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') + # self.c_dict['DATE_BEG'] = run_date + # self.c_dict['DATE_END'] = run_date + # self.run_stat_analysis() def _read_jobs_from_config(self): jobs = [] From 49b7a2ff8afb0baa13bcca088cc4d0e98243d621 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:29:09 -0600 Subject: [PATCH 54/92] moved variables to store datetime notation to set YYYYMMDD and YYYYMMDD_HHMMSS into constant variable since it is referenced frequently, ci-run-diff --- metplus/util/constants.py | 6 ++++++ metplus/wrappers/stat_analysis_wrapper.py | 20 +++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/metplus/util/constants.py b/metplus/util/constants.py index e73408a2f1..e56f9def51 100644 --- a/metplus/util/constants.py +++ b/metplus/util/constants.py @@ -102,3 +102,9 @@ 'SCRUB_STAGING_DIR', 'MET_BIN_DIR', ] + +# datetime year month day (YYYYMMDD) notation +YMD = '%Y%m%d' + +# datetime year month day hour minute second (YYYYMMDD_HHMMSS) notation +YMD_HMS = '%Y%m%d_%H%M%S' diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 4ddc10f5ff..6c68bc839e 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -23,6 +23,7 @@ from ..util import get_start_and_end_times from ..util import time_string_to_met_time, get_relativedelta from ..util import ti_get_seconds_from_relativedelta +from ..util import YMD, YMD_HMS from . import CommandBuilder class StatAnalysisWrapper(CommandBuilder): @@ -218,7 +219,7 @@ def run_all_times(self): # def run_at_time(self, input_dict): # loop_by = self.c_dict['DATE_TYPE'] - # run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') + # run_date = input_dict[loop_by.lower()].strftime(YMD) # self.c_dict['DATE_BEG'] = run_date # self.c_dict['DATE_END'] = run_date # self.run_stat_analysis() @@ -670,8 +671,8 @@ def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): prefix = f"{fcst_or_obs}_{self.c_dict['DATE_TYPE'].lower()}" # get YYYYMMDD of begin and end time - beg_ymd = datetime.strptime(date_beg.strftime('%Y%m%d'), '%Y%m%d') - end_ymd = datetime.strptime(date_end.strftime('%Y%m%d'), '%Y%m%d') + beg_ymd = datetime.strptime(date_beg.strftime(YMD), YMD) + end_ymd = datetime.strptime(date_end.strftime(YMD), YMD) # if hour list is provided, truncate begin and end time to YYYYMMDD # and add first hour offset to begin time and last hour to end time @@ -870,12 +871,9 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, date_beg = self.c_dict['DATE_BEG'] date_end = self.c_dict['DATE_END'] - ymd = '%Y%m%d' - ymd_hms = '%Y%m%d_%H%M%S' - # get YYYYMMDD of begin and end time - beg_ymd = date_beg.strftime(ymd) - end_ymd = date_end.strftime(ymd) + beg_ymd = date_beg.strftime(YMD) + end_ymd = date_end.strftime(YMD) prefix = f'{fcst_or_obs}_{init_or_valid}' hour_list = config_dict[f'{prefix}_HOUR'].split(', ') @@ -883,13 +881,13 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, # if hour list is not set if not hour_list or (len(hour_list) == 1 and hour_list == ['']): if date_type == init_or_valid: - config_dict[f'{prefix}_BEG'] = date_beg.strftime(ymd_hms) + config_dict[f'{prefix}_BEG'] = date_beg.strftime(YMD_HMS) # if end time is only YYYYMMDD, set HHHMMSS to 23:59:59 - if date_end == datetime.strptime(end_ymd, ymd): + if date_end == datetime.strptime(end_ymd, YMD): config_dict[f'{prefix}_END'] = f'{end_ymd}_235959' else: - config_dict[f'{prefix}_END'] = date_end.strftime(ymd_hms) + config_dict[f'{prefix}_END'] = date_end.strftime(YMD_HMS) return # if multiple hours are specified From 75375b9bde092782681479b1532aaf16b8370f77 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 11:24:14 -0600 Subject: [PATCH 55/92] Removed rest of logic that reads filename type since we now require to set a filename template for [dump_row_file] and [out_stat_file] instead of generating a default template. Add error checking to ensure that at least 1 model is specified with MODEL* variables and the corresponding filename template variable is set if [dump_row_file] or [out_stat_file] are found in any jobs. Added unit tests to ensure init set catches these cases to alert users of errors before attempting to run --- .../stat_analysis/test_stat_analysis.py | 75 ++++++++++++++++++- metplus/wrappers/stat_analysis_wrapper.py | 63 +++++++++------- 2 files changed, 106 insertions(+), 32 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index f680212e18..5022979c9f 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -59,6 +59,73 @@ def _set_config_dict_values(): return config_dict +def set_minimum_config_settings(config): + # set config variables to prevent command from running and bypass check + # if input files actually exist + config.set('config', 'DO_NOT_RUN_EXE', True) + config.set('config', 'INPUT_MUST_EXIST', False) + + # set process and time config variables + config.set('config', 'PROCESS_LIST', 'StatAnalysis') + config.set('config', 'LOOP_BY', 'INIT') + config.set('config', 'INIT_TIME_FMT', '%Y%m%d') + config.set('config', 'INIT_BEG', '20221014') + config.set('config', 'INIT_END', '20221014') + config.set('config', 'STAT_ANALYSIS_OUTPUT_DIR', + '{OUTPUT_BASE}/StatAnalysis/output') + config.set('config', 'STAT_ANALYSIS_OUTPUT_TEMPLATE', + '{valid?fmt=%Y%m%d%H}') + config.set('config', 'GROUP_LIST_ITEMS', 'DESC_LIST') + config.set('config', 'LOOP_LIST_ITEMS', 'MODEL_LIST') + config.set('config', 'MODEL_LIST', 'MODEL1') + config.set('config', 'STAT_ANALYSIS_JOB1', '-job filter') + config.set('config', 'MODEL1', 'MODEL_A') + config.set('config', 'MODEL1_OBTYPE', 'OBTYPE_A') + config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', '/some/lookin/dir') + + # not required, can be unset for certain tests + config.set('config', 'STAT_ANALYSIS_CONFIG_FILE', + '{PARM_BASE}/met_config/STATAnalysisConfig_wrapped') + + +@pytest.mark.parametrize( + 'config_overrides, expected_result', [ + ({}, True), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template'}, + True), + ({'STAT_ANALYSIS_JOB1': '-job filter -out_stat [out_stat_file]'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE': 'some/template'}, + True), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'STAT_ANALYSIS_JOB2': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'STAT_ANALYSIS_JOB2': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template', + 'MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE': 'some/template'}, + True), + ] +) +@pytest.mark.wrapper_d +def test_check_required_job_template(metplus_config, config_overrides, + expected_result): + config = metplus_config() + set_minimum_config_settings(config) + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + print(wrapper.c_dict['JOBS']) + print(wrapper.c_dict['MODEL_INFO_LIST']) + assert wrapper.isOK == expected_result + + @pytest.mark.parametrize( 'c_dict, expected_result', [ # 0 @@ -713,10 +780,10 @@ def test_parse_model_info(metplus_config): assert test_model_info_list[0]['name'] == expected_name assert test_model_info_list[0]['reference_name'] == expected_reference_name assert test_model_info_list[0]['obtype'] == expected_obtype - assert test_model_info_list[0]['dump_row_filename_template'] == expected_dump_row_filename_template - assert test_model_info_list[0]['dump_row_filename_type'] == expected_dump_row_filename_type - assert test_model_info_list[0]['out_stat_filename_template'] == expected_out_stat_filename_template - assert test_model_info_list[0]['out_stat_filename_type'] == expected_out_stat_filename_type + assert (test_model_info_list[0]['dump_row_filename_template'] == + expected_dump_row_filename_template) + assert (test_model_info_list[0]['out_stat_filename_template'] + == expected_out_stat_filename_template) @pytest.mark.wrapper_d diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 6c68bc839e..051b3f31e1 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -235,11 +235,15 @@ def _read_jobs_from_config(self): if job_indices: for j_id in job_indices: job = self.config.getraw('config', f'STAT_ANALYSIS_JOB{j_id}') - jobs.append(job) - else: + if job: + jobs.append(job) + + # if not jobs found, check for old _JOB_NAME and _JOB_ARGS variables + if not jobs: job_name = self.config.getraw('config', 'STAT_ANALYSIS_JOB_NAME') job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') - jobs.append(f'-job {job_name} {job_args}') + if job_name and job_args: + jobs.append(f'-job {job_name} {job_args}') return jobs @@ -265,6 +269,22 @@ def c_dict_error_check(self, c_dict, all_field_lists_empty): self.log_error( "Must set at least one job with STAT_ANALYSIS_JOB" ) + else: + # check if [dump_row_file] or [out_stat_file] are in any job + for job in c_dict['JOBS']: + for check in ('dump_row_file', 'out_stat_file'): + if f'[{check}]' not in job: + continue + for model in c_dict['MODEL_INFO_LIST']: + if model[f'{check}name_template']: + continue + conf = check.replace('_file', '').upper() + conf = f"STAT_ANALYSIS_{conf}_TEMPLATE" + self.log_error(f'Must set {conf} if [{check}] is used' + ' in a job') + # error if they are found but their templates are not set + + for conf_list in self.LIST_CATEGORIES: if not c_dict[conf_list]: @@ -933,7 +953,7 @@ def parse_model_info(self): f"set if MODEL{m} is set.") return None, None - model_obtype = self.config.getstr('config', f'MODEL{m}_OBTYPE', '') + model_obtype = self.config.getraw('config', f'MODEL{m}_OBTYPE') if not model_obtype: self.log_error(f"MODEL{m}_OBTYPE must be set " f"if MODEL{m} is set.") @@ -952,22 +972,10 @@ def parse_model_info(self): self.config.getraw('config', var_name) ) - if not model_filename_template: - model_filename_template = '{model?fmt=%s}_{obtype?fmt=%s}_' - model_filename_type = 'default' - else: - model_filename_type = 'user' - if output_type == 'DUMP_ROW': - model_dump_row_filename_template = ( - model_filename_template - ) - model_dump_row_filename_type = model_filename_type + model_dump_row_filename_template = model_filename_template elif output_type == 'OUT_STAT': - model_out_stat_filename_template = ( - model_filename_template - ) - model_out_stat_filename_type = model_filename_type + model_out_stat_filename_template = model_filename_template mod = { 'name': model_name, @@ -975,23 +983,23 @@ def parse_model_info(self): 'dir': model_dir, 'obtype': model_obtype, 'dump_row_filename_template': model_dump_row_filename_template, - 'dump_row_filename_type': model_dump_row_filename_type, 'out_stat_filename_template': model_out_stat_filename_template, - 'out_stat_filename_type': model_out_stat_filename_type, } model_info_list.append(mod) + if not model_info_list: + self.log_error('At least one set of model information must be ' + 'set using MODEL, MODEL_OBTYPE, and ' + 'MODEL_STAT_ANALYSIS_LOOKIN_DIR') + return model_info_list def process_job_args(self, job_type, job, model_info, - lists_to_loop_items, runtime_settings_dict): + runtime_settings_dict): output_template = ( model_info[f'{job_type}_filename_template'] ) - filename_type = ( - model_info[f'{job_type}_filename_type'] - ) output_filename = ( self.get_output_filename(job_type, @@ -1027,8 +1035,7 @@ def get_all_runtime_settings(self): return None runtime_settings['JOBS'] = ( - self.get_job_info(model_info, runtime_settings, - self.c_dict['LOOP_LIST_ITEMS']) + self.get_job_info(model_info, runtime_settings) ) # get -out argument if set @@ -1276,7 +1283,7 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): # return last model info dict used return model_info - def get_job_info(self, model_info, runtime_settings_dict, loop_lists): + def get_job_info(self, model_info, runtime_settings_dict): """! Get job information and concatenate values into a string @params model_info model info to use to determine output file paths @@ -1293,7 +1300,7 @@ def get_job_info(self, model_info, runtime_settings_dict, loop_lists): continue job = self.process_job_args(job_type, job, model_info, - loop_lists, runtime_settings_dict) + runtime_settings_dict) # substitute filename templates that may be found in rest of job job = do_string_sub(job, **stringsub_dict) From d0cc48c3c58e552c17fe4601b4dbe54c25855f85 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 12:14:30 -0600 Subject: [PATCH 56/92] improved list to string logic by handling if quotes already exist when attempting to add them to each item --- .../tests/pytests/util/string_manip/test_util_string_manip.py | 2 ++ metplus/util/string_manip.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index 541733e436..7a1f2f7992 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -158,6 +158,8 @@ def test_getlist_begin_end_incr(list_string, output_list): (['0', '1', '2'], True, '"0", "1", "2"'), (['a', 'b', 'c'], False, 'a, b, c'), (['0', '1', '2'], False, '0, 1, 2'), + (['"a"', '"b"', '"c"'], True, '"a", "b", "c"'), + (['"0"', '"1"', '"2"'], True, '"0", "1", "2"'), ] ) @pytest.mark.util diff --git a/metplus/util/string_manip.py b/metplus/util/string_manip.py index 1064070d9b..61bc8e3e33 100644 --- a/metplus/util/string_manip.py +++ b/metplus/util/string_manip.py @@ -205,6 +205,8 @@ def list_to_str(list_of_values, add_quotes=True): return '' if add_quotes: - return '"' + '", "'.join(list_of_values) + '"' + # remove any quotes that are already around items, then add quotes + values = [remove_quotes(item) for item in list_of_values] + return '"' + '", "'.join(values) + '"' return ', '.join(list_of_values) From 3e8b832806a5222368a59f9faee48bc284f67917 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 12:23:01 -0600 Subject: [PATCH 57/92] made MODEL_OBTYPE optional so users can filter without obtype, remove MODEL_REFERENCE_NAME because it was only used by MakePlots wrapper --- docs/Users_Guide/glossary.rst | 12 +------ docs/Users_Guide/wrappers.rst | 3 +- .../stat_analysis/test_stat_analysis.py | 7 ++-- metplus/wrappers/stat_analysis_wrapper.py | 34 +++++++------------ 4 files changed, 17 insertions(+), 39 deletions(-) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index f612dfe15e..f1f29f4fb9 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -2421,18 +2421,8 @@ METplus Configuration Glossary | *Used by:* StatAnalysis - MODEL_NAME_ON_PLOT - .. warning:: **DEPRECATED:** Please use :term:`MODEL_REFERENCE_NAME` instead. - MODEL_REFERENCE_NAME - Define the name the first model will be listed as on the plots. There can be number of models defined in configuration files, simply increment the "MODEL1" string to match the total number of models being used, e.g.: - - | MODEL1_REFERENCE_NAME - | MODEL2_REFERENCE_NAME - | ... - | MODELN_REFERENCE_NAME - - | *Used by:* StatAnalysis + .. warning:: **DEPRECATED:** No longer used. MODEL_OBS_NAME .. warning:: **DEPRECATED:** Please use :term:`MODEL_OBTYPE` instead. diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 7c2f2cf6de..1ef52a174b 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6789,7 +6789,6 @@ The following values **must** be defined in the METplus configuration file: | :term:`MODEL\` | :term:`MODEL_OBTYPE` | :term:`MODEL_STAT_ANALYSIS_LOOKIN_DIR` -| :term:`MODEL_REFERENCE_NAME` | :term:`GROUP_LIST_ITEMS` | :term:`LOOP_LIST_ITEMS` | :term:`MODEL_LIST` @@ -6798,7 +6797,6 @@ The following values **must** be defined in the METplus configuration file: | :term:`LINE_TYPE_LIST` | :term:`STAT_ANALYSIS_JOB_NAME` | :term:`STAT_ANALYSIS_JOB_ARGS` -| :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` | The following values are optional in the METplus configuration file: @@ -6820,6 +6818,7 @@ The following values are optional in the METplus configuration file: | :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` +| :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` | .. warning:: **DEPRECATED:** diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 5022979c9f..fe11d3b22f 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -80,7 +80,6 @@ def set_minimum_config_settings(config): config.set('config', 'MODEL_LIST', 'MODEL1') config.set('config', 'STAT_ANALYSIS_JOB1', '-job filter') config.set('config', 'MODEL1', 'MODEL_A') - config.set('config', 'MODEL1_OBTYPE', 'OBTYPE_A') config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', '/some/lookin/dir') # not required, can be unset for certain tests @@ -760,9 +759,8 @@ def test_parse_model_info(metplus_config): # are as expected st = stat_analysis_wrapper(metplus_config) # Test 1 - expected_name = 'MODEL_TEST' - expected_reference_name = 'MODELTEST' - expected_obtype = 'MODEL_TEST_ANL' + expected_name = '"MODEL_TEST"' + expected_obtype = '"MODEL_TEST_ANL"' expected_dump_row_filename_template = ( '{fcst_valid_hour?fmt=%H}Z/MODEL_TEST/' +'MODEL_TEST_{valid?fmt=%Y%m%d}.stat' @@ -778,7 +776,6 @@ def test_parse_model_info(metplus_config): expected_out_stat_filename_type = 'user' test_model_info_list = st.parse_model_info() assert test_model_info_list[0]['name'] == expected_name - assert test_model_info_list[0]['reference_name'] == expected_reference_name assert test_model_info_list[0]['obtype'] == expected_obtype assert (test_model_info_list[0]['dump_row_filename_template'] == expected_dump_row_filename_template) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 051b3f31e1..c33342d82b 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -940,10 +940,10 @@ def parse_model_info(self): ) for m in model_indices: model_name = self.config.getraw('config', f'MODEL{m}') - model_reference_name = ( - self.config.getraw('config', f'MODEL{m}_REFERENCE_NAME', - model_name) - ) + + # add quotes to model name if a value is set + model_name = f'"{model_name}"' if model_name else '' + model_dir = ( self.config.getraw('config', f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') @@ -954,10 +954,7 @@ def parse_model_info(self): return None, None model_obtype = self.config.getraw('config', f'MODEL{m}_OBTYPE') - if not model_obtype: - self.log_error(f"MODEL{m}_OBTYPE must be set " - f"if MODEL{m} is set.") - return None, None + model_obtype = f'"{model_obtype}"' if model_obtype else '' for output_type in ['DUMP_ROW', 'OUT_STAT']: var_name = f'STAT_ANALYSIS_{output_type}_TEMPLATE' @@ -979,7 +976,6 @@ def parse_model_info(self): mod = { 'name': model_name, - 'reference_name': model_reference_name, 'dir': model_dir, 'obtype': model_obtype, 'dump_row_filename_template': model_dump_row_filename_template, @@ -1236,28 +1232,25 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): """ lookin_dirs = [] model_list = [] - ref_list = [] obtype_list = [] dump_row_filename_list = [] # get list of models to process - models_to_run = [ - model.strip().replace('"', '') - for model in runtime_settings_dict['MODEL'].split(',') - ] + models_to_run = runtime_settings_dict['MODEL'].split(',') for model_info in self.c_dict['MODEL_INFO_LIST']: # skip model if not in list of models to process if model_info['name'] not in models_to_run: continue model_list.append(model_info['name']) - ref_list.append(model_info['reference_name']) - obtype_list.append(model_info['obtype']) + if model_info['obtype']: + obtype_list.append(model_info['obtype']) dump_row_filename_list.append( model_info['dump_row_filename_template'] ) + # set MODEL and OBTYPE to single item to find lookin dir - runtime_settings_dict['MODEL'] = f'"{model_info["name"]}"' - runtime_settings_dict['OBTYPE'] = f'"{model_info["obtype"]}"' + runtime_settings_dict['MODEL'] = model_info["name"] + runtime_settings_dict['OBTYPE'] = model_info["obtype"] lookin_dirs.append( self.get_lookin_dir(model_info['dir'], runtime_settings_dict) @@ -1271,13 +1264,12 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): self.log_error("No value found for lookin dir") return None - if not model_list or not obtype_list: - self.log_error("Could not find model or obtype to process") + if not model_list: + self.log_error("Could not find model to process") return None # set values in runtime settings dict for model and obtype runtime_settings_dict['MODEL'] = list_to_str(model_list) - runtime_settings_dict['MODEL_REFERENCE_NAME'] = list_to_str(ref_list) runtime_settings_dict['OBTYPE'] = list_to_str(obtype_list) # return last model info dict used From acc04d3569abb429f23d397ec7f68d39bb254c6a Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:20:20 -0600 Subject: [PATCH 58/92] exit non-zero and print an error if any warnings/errors occur when running build_docs.py script locally to easily see if there are any fixes needed to the docs --- docs/build_docs.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/build_docs.py b/docs/build_docs.py index 723d5c25bc..444d9bff7b 100755 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -160,6 +160,10 @@ def main(): if os.stat(warning_file).st_size == 0: print(f"No warnings found, removing {warning_file}") os.remove(warning_file) + else: + print('ERROR: Doc build contains warnings or errors. ' + f'Please review {warning_file}') + sys.exit(1) print("Documentation build completed") From 9c44e40ab48d5e35e37ada5e11e2587eda69e36f Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:21:29 -0600 Subject: [PATCH 59/92] added glossary entry for STAT_ANALYSIS_JOB variable and cleaned up list of required and optional config variables for StatAnalysis wrapper --- docs/Users_Guide/glossary.rst | 19 ++++++++++------- docs/Users_Guide/wrappers.rst | 39 +++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index f1f29f4fb9..348d34ce89 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -2198,18 +2198,23 @@ METplus Configuration Glossary JOB_ARGS .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB_ARGS` instead. - STAT_ANALYSIS_JOB_ARGS - Specify stat_analysis job arguments to run. The job arguments that are to be run with the corresponding :term:`STAT_ANALYSIS_JOB_NAME`. If using -dump_row, use -dump_row [dump_row_filename]. If using -out_stat, -out_stat [out_stat_filename]. For more information on these job arguments, please see the `MET User's Guide `_. + STAT_ANALYSIS_JOB + Specify StatAnalysis job arguments to run. Include the full set of job + arguments including the -job argument. Multiple jobs can be defined by + with STAT_ANALYSIS_JOB1, STAT_ANALYSIS_JOB2, etc. + Filename template tags can be used to insert values from a given run into + the job arguments. The keywords [dump_row_file] and [out_stat_file] can + be used and will be substituted with values from + :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` and + :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` respectively. | *Used by:* StatAnalysis - JOB_NAME - .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB_NAME` instead. + STAT_ANALYSIS_JOB_ARGS + .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB\` instead. STAT_ANALYSIS_JOB_NAME - Specify stat_analysis job name to run. Valid options are filter, summary, aggregate, aggregate_stat, go_index, and ramp. For more information on these job names and what they do, please see the `MET User's Guide `_. - - | *Used by:* StatAnalysis + .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB\` instead. EXTRACT_TILES_LAT_ADJ Specify a latitude adjustment, in degrees to be used in the analysis. In the ExtractTiles wrapper, this corresponds to the 2m portion of the 2n x 2m subregion tile. diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 1ef52a174b..51ddfef591 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6784,42 +6784,47 @@ METplus Configuration The following values **must** be defined in the METplus configuration file: +| :term:`STAT_ANALYSIS_JOB\` | :term:`STAT_ANALYSIS_OUTPUT_DIR` -| :term:`LOG_STAT_ANALYSIS_VERBOSITY` | :term:`MODEL\` -| :term:`MODEL_OBTYPE` | :term:`MODEL_STAT_ANALYSIS_LOOKIN_DIR` | :term:`GROUP_LIST_ITEMS` | :term:`LOOP_LIST_ITEMS` -| :term:`MODEL_LIST` -| :term:`VX_MASK_LIST` -| :term:`FCST_LEAD_LIST` -| :term:`LINE_TYPE_LIST` -| :term:`STAT_ANALYSIS_JOB_NAME` -| :term:`STAT_ANALYSIS_JOB_ARGS` -| The following values are optional in the METplus configuration file: | :term:`STAT_ANALYSIS_CONFIG_FILE` +| :term:`LOG_STAT_ANALYSIS_VERBOSITY` +| :term:`MODEL_OBTYPE` | :term:`VAR_FOURIER_DECOMP` | :term:`VAR_WAVE_NUM_LIST` +| :term:`MODEL_LIST` +| :term:`DESC_LIST` +| :term:`FCST_LEAD_LIST` +| :term:`OBS_LEAD_LIST` | :term:`FCST_VALID_HOUR_LIST` -| :term:`OBS_VALID_HOUR_LIST` | :term:`FCST_INIT_HOUR_LIST` +| :term:`OBS_VALID_HOUR_LIST` | :term:`OBS_INIT_HOUR_LIST` -| :term:`OBS_LEAD_LIST` -| :term:`DESC_LIST` +| :term:`FCST_VAR_LIST` +| :term:`OBS_VAR_LIST` +| :term:`FCST_UNITS_LIST` +| :term:`OBS_UNITS_LIST` +| :term:`FCST_LEVEL_LIST` +| :term:`OBS_LEVEL_LIST` +| :term:`VX_MASK_LIST` | :term:`INTERP_MTHD_LIST` | :term:`INTERP_PNTS_LIST` +| :term:`FCST_THRESH_LIST` +| :term:`OBS_THRESH_LIST` | :term:`COV_THRESH_LIST` | :term:`ALPHA_LIST` +| :term:`LINE_TYPE_LIST` | :term:`STAT_ANALYSIS_HSS_EC_VALUE` | :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` | :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` -| .. warning:: **DEPRECATED:** @@ -6835,7 +6840,7 @@ The following values are optional in the METplus configuration file: | :term:`INIT_HOUR_INCREMENT` | :term:`MODEL` | :term:`OBTYPE` - | :term:`JOB_NAME` + | JOB_NAME | :term:`JOB_ARGS` | :term:`FCST_LEAD` | :term:`FCST_VAR_NAME` @@ -6853,11 +6858,13 @@ The following values are optional in the METplus configuration file: | :term:`PLOT_TIME` | :term:`MODEL_NAME` | :term:`MODEL_OBS_NAME` - | :term:`MODEL_NAME_ON_PLOT` + | MODEL_NAME_ON_PLOT | :term:`MODEL_STAT_DIR` | :term:`REGION_LIST` | :term:`LEAD_LIST` - | + | :term:`STAT_ANALYSIS_JOB_NAME` + | :term:`STAT_ANALYSIS_JOB_ARGS` + .. _stat-analysis-met-conf: From 926d231fc6e5017feb85dec36a0419dc42ee5fc0 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:27:11 -0600 Subject: [PATCH 60/92] added info about setting groups of fcst leads --- docs/Users_Guide/glossary.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index 348d34ce89..366443177d 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -2236,7 +2236,11 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`FCST_LEAD_LIST` instead. FCST_LEAD_LIST - Specify the values of the FSCT_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 + Specify the values of the FSCT_LEAD column in the MET .stat file to use. + Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 + Groups of values can be looped over by setting FCST_LEAD_LIST and + adding FCST_LEAD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat_analysis_wrapper` Description for more information. | *Used by:* StatAnalysis From 867783f40706a8da9716e9485400c290b88f0843 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:11:06 -0600 Subject: [PATCH 61/92] added info specific to StatAnalysis wrapper its Description section and adding info to glossary items about looping over groups of list items --- docs/Users_Guide/glossary.rst | 109 +++++++++++++++++++++++++++++++--- docs/Users_Guide/wrappers.rst | 71 +++++++++++++++++++--- 2 files changed, 164 insertions(+), 16 deletions(-) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index 366443177d..df39a3c89c 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -875,13 +875,23 @@ METplus Configuration Glossary | *Used by:* TCMPRPlotter DESC_LIST - A single value or list of values used in the stat_analysis data stratification. Specifies the values of the DESC column in the MET .stat file to use. + A single value or list of values used in the stat_analysis data + stratification. + Specifies the values of the DESC column in the MET .stat file to use. + + Groups of values can be looped over by setting DESC_LIST and + adding DESC_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis ALPHA_LIST A single value or list of values used in the stat_analysis data stratification. Specifies the values of the ALPHA column in the MET .stat file to use. + Groups of values can be looped over by setting ALPHA_LIST and + adding ALPHA_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis DLAND_FILE @@ -1581,11 +1591,19 @@ METplus Configuration Glossary FCST_THRESH_LIST Specify the values of the FCST_THRESH column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_THRESH_LIST and + adding FCST_THRESH_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_THRESH_LIST Specify the values of the OBS_THRESH column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_THRESH_LIST and + adding OBS_THRESH_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_TILE_PREFIX @@ -1608,6 +1626,10 @@ METplus Configuration Glossary FCST_LEVEL_LIST Specify the values of the FCST_LEV column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_LEVEL_LIST and + adding FCST_LEVEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_VAR_NAME @@ -1616,11 +1638,19 @@ METplus Configuration Glossary FCST_VAR_LIST Specify the values of the FCST_VAR column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_VAR_LIST and + adding FCST_VAR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_UNITS_LIST Specify the values of the FCST_UNITS column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_UNITS_LIST and + adding FCST_UNITS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_VAR_LEVELS @@ -2130,12 +2160,22 @@ METplus Configuration Glossary | *Used by:* TCPairs FCST_INIT_HOUR_LIST - Specify a list of hours for initialization times of forecast files for use in the analysis. + Specify a list of hours for initialization times of forecast files for + use in the analysis. + + Groups of values can be looped over by setting FCST_INIT_HOUR_LIST and + adding FCST_INIT_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis OBS_INIT_HOUR_LIST - Specify a list of hours for initialization times of observation files for use in the analysis. + Specify a list of hours for initialization times of observation files for + use in the analysis. + + Groups of values can be looped over by setting OBS_INIT_HOUR_LIST and + adding OBS_INIT_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis @@ -2178,7 +2218,12 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`INTERP_MTHD_LIST` instead. INTERP_MTHD_LIST - Specify the values of the INTERP_MTHD column in the MET .stat file to use; specify the interpolation used to create the MET .stat files. + Specify the values of the INTERP_MTHD column in the MET .stat file to use; + specify the interpolation used to create the MET .stat files. + + Groups of values can be looped over by setting INTERP_MTHD_LIST and + adding INTERP_MTHD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis @@ -2186,7 +2231,12 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`INTERP_PNTS_LIST` instead. INTERP_PNTS_LIST - Specify the values of the INTERP_PNTS column in the MET .stat file to use; corresponds to the interpolation in the MET .stat files. + Specify the values of the INTERP_PNTS column in the MET .stat file to use; + corresponds to the interpolation in the MET .stat files. + + Groups of values can be looped over by setting INTERP_PNTS_LIST and + adding INTERP_PNTS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis @@ -2238,15 +2288,20 @@ METplus Configuration Glossary FCST_LEAD_LIST Specify the values of the FSCT_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 + Groups of values can be looped over by setting FCST_LEAD_LIST and adding FCST_LEAD_LIST to :term:`LOOP_LIST_ITEMS`. - See :ref:`stat_analysis_wrapper` Description for more information. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis OBS_LEAD_LIST Specify the values of the OBS_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 + Groups of values can be looped over by setting OBS_LEAD_LIST and + adding OBS_LEAD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis LEAD_SEQ @@ -2289,6 +2344,10 @@ METplus Configuration Glossary LINE_TYPE_LIST Specify the MET STAT line types to be considered. + Groups of values can be looped over by setting LINE_TYPE_LIST and + adding LINE_TYPE_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis, TCMPRPlotter LOG_DIR @@ -2414,6 +2473,11 @@ METplus Configuration Glossary MODEL_LIST List of the specified the model names. + If this is left unset, then values from :term:`MODEL\` will be used. + + Groups of values can be looped over by setting MODEL_LIST and + adding MODEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis @@ -3167,6 +3231,10 @@ METplus Configuration Glossary OBS_LEVEL_LIST Specify the values of the OBS_LEV column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_LEVEL_LIST and + adding OBS_LEVEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_VAR_NAME @@ -3175,11 +3243,19 @@ METplus Configuration Glossary OBS_VAR_LIST Specify the values of the OBS_VAR column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_VAR_LIST and + adding OBS_VAR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_UNITS_LIST Specify the values of the OBS_UNITS column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_UNITS_LIST and + adding OBS_UNITS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_VAR_LEVELS @@ -3528,7 +3604,12 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`VX_MASK_LIST` instead. VX_MASK_LIST - Specify the values of the VX_MASK column in the MET .stat file to use; a list of the verification regions of interest. + Specify the values of the VX_MASK column in the MET .stat file to use; + a list of the verification regions of interest. + + Groups of values can be looped over by setting VX_MASK_LIST and + adding VX_MASK_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis @@ -4266,12 +4347,22 @@ METplus Configuration Glossary | *Used by:* All FCST_VALID_HOUR_LIST - Specify a list of hours for valid times of forecast files for use in the analysis. + Specify a list of hours for valid times of forecast files for use + in the analysis. + + Groups of values can be looped over by setting FCST_VALID_HOUR_LIST and + adding FCST_VALID_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis OBS_VALID_HOUR_LIST - Specify a list of hours for valid times of observation files for use in the analysis. + Specify a list of hours for valid times of observation files for use + in the analysis. + + Groups of values can be looped over by setting OBS_VALID_HOUR_LIST and + adding OBS_VALID_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. | *Used by:* StatAnalysis diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 51ddfef591..bc15d65f0f 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6746,6 +6746,9 @@ The StatAnalysis wrapper encapsulates the behavior of the MET stat_analysis tool. It provides the infrastructure to summarize and filter the MET .stat files. +Timing +^^^^^^ + This wrapper is configured differently than many of the other wrappers that loop over multiple run times. The StatAnalysis wrapper is designed to process a range of run times at once using filtering to subset what is processed. @@ -6755,6 +6758,35 @@ The LEAD_SEQ variable that typically defines a list of forecast leads to process is not used by the wrapper. Instead the FCST_LEAD_LIST and OBS_LEAD_LIST are used to filter out forecast leads from the data. +Optional MET Configuration File +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The wrapped MET config file specified with :term:`STAT_ANALYSIS_CONFIG_FILE` is +optional in the StatAnalysis wrapper. Excluding this option will result in a +call to stat_analysis with the job arguments added via the command line. +Only 1 job can be defined in no wrapped MET configuration file is used. +To use a configuration file, set the following in the METplus config file:: + + STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped + +Jobs +^^^^ + +The job arguments can be defined by setting :term:`STAT_ANALYSIS_JOB\` +variables, e.g. STAT_ANALYSIS_JOB1. All of the job commands including the -job +argument are set here. +Prior to v5.0.0, the config variables STAT_ANALYSIS_JOB_NAME and +STAT_ANALYSIS_JOB_ARGS were used to set the value following the -job argument +and any other job arguments respectively. + +Multiple jobs can be defined as of v5.0.0 using +STAT_ANALYSIS_JOB1, STAT_ANALYSIS_JOB2, etc. All jobs will be passed to each +call to stat_analysis. Only 1 job can be specified if no MET config file is +set with :term:`STAT_ANALYSIS_CONFIG_FILE`. + +Filtering with Lists +^^^^^^^^^^^^^^^^^^^^ + There are many configuration variables that end with \_LIST that control settings in the STATAnalysisConfig_wrapped file. For example, MODEL_LIST controls the model variable in the MET config file and @@ -6770,13 +6802,38 @@ or LOOP_LIST_ITEMS will be automatically added to GROUP_LIST_ITEMS. Lists defined in LOOP_LIST_ITEMS that are empty lists will be automatically moved to GROUP_LIST_ITEMS. -Output files: -dump_row, -out_stat, and -out - -Config file optional - -New in v5.0.0: Multiple jobs - -New in v5.0.0: Looping over groups of list items +.. _stat-analysis-looping-groups: + +Looping Over Groups of Lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New in v5.0.0 is the ability to define groups of list items that can be looped +over. For example, a user may want to process forecast leads 1-3 in a +single run, then process forecast leads 4-6 in the next. To accomplish this, +define each group of items in a separate config variable ending with a number. +Then add the name of the list (without the numbers) to LOOP_LIST_ITEMS:: + + [config] + FCST_LEAD_LIST1 = 1,2,3 + FCST_LEAD_LIST2 = 4,5,6 + LOOP_LIST_ITEMS = FCST_LEAD_LIST + +If FCST_LEAD_LIST was added to GROUP_LIST_ITEMS instead, then all 6 items +defined in the 2 lists will be combined and passed to the tool at once. + +Outputs +^^^^^^^ + +This wrapper can be configured to write 3 types of output files. +Output files specified with the -out command line argument can be defined by +setting :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` and optionally +:term:`STAT_ANALYSIS_OUTPUT_DIR`. +Output files specified with the -dump_row or -out_stat arguments must be +defined in a job using :term:`STAT_ANALYSIS_JOB\`. +The [dump_row_file] keyword can be added to a job after the -dump_row argument +only if a :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE is set. Similarly, +the [out_stat_file] keyword can be added to a job after the -out_stat argument +only if a :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE is set. METplus Configuration From fe3c2a0d8b968c204002af71209082abfad0d173 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:39:03 -0600 Subject: [PATCH 62/92] added info about additional filename template tags that can be used in StatAnalysis wrapper --- docs/Users_Guide/wrappers.rst | 85 +++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index bc15d65f0f..6b5a09d6fb 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6821,6 +6821,91 @@ Then add the name of the list (without the numbers) to LOOP_LIST_ITEMS:: If FCST_LEAD_LIST was added to GROUP_LIST_ITEMS instead, then all 6 items defined in the 2 lists will be combined and passed to the tool at once. +Additional Filename Template Tags +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The StatAnalysis wrapper supports additional tags that can be substituted into +the input and output paths because the wrapper processes a range of time. + +The following filename template tags can be used: + +* model +* desc +* vx_mask +* interp_mthd +* interp_pnts +* cov_thresh +* alpha +* line_type +* fcst_var +* obs_var +* fcst_units +* obs_units +* fcst_thresh +* obs_thresh +* fcst_level +* obs_level +* fcst_valid_hour +* obs_valid_hour +* fcst_init_hour +* obs_init_hour +* fcst_lead +* obs_lead +* fcst_valid_hour_beg +* fcst_valid_hour_end +* obs_valid_hour_beg +* obs_valid_hour_end +* fcst_init_hour_beg +* fcst_init_hour_end +* obs_init_hour_beg +* obs_init_hour_end +* valid_hour +* valid_hour_beg +* valid_hour_end +* init_hour +* init_hour_beg +* init_hour_end +* fcst_valid +* fcst_valid_beg +* fcst_valid_end +* fcst_init +* fcst_init_beg +* fcst_init_end +* obs_valid +* obs_valid_beg +* obs_valid_end +* obs_init +* obs_init_beg +* obs_init_end +* valid +* valid_beg +* valid_end +* init +* init_beg +* init_end +* fcst_lead +* fcst_lead_hour +* fcst_lead_min +* fcst_lead_sec +* fcst_lead_totalsec +* obs_lead +* obs_lead_hour +* obs_lead_min +* obs_lead_sec +* obs_lead_totalsec +* lead +* lead_hour +* lead_min +* lead_sec +* lead_totalsec + +Please note that some of these items will be set to an empty string depending +on the configuration. For example, lead_hour, lead_min, lead_sec, and +lead_totalsec cannot be computed if there are multiple leads being processed +in a given run. Another example, if fcst_valid_beg has the same value as +fcst_valid_end, then fcst_valid will be set to the same value, otherwise it +will be left as an empty string. + Outputs ^^^^^^^ From 6b30b6b7f1db3218ba2370252885776bc91e199f Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:50:36 -0600 Subject: [PATCH 63/92] added info to note that full day of end time will be processed if only YYYMMDD is specified in VALID/INIT_END --- docs/Users_Guide/wrappers.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 6b5a09d6fb..596679357b 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6754,7 +6754,16 @@ loop over multiple run times. The StatAnalysis wrapper is designed to process a range of run times at once using filtering to subset what is processed. The VALID_BEG and VALID_END or INIT_BEG and INIT_END variables are used to calculate filtering criteria. -The LEAD_SEQ variable that typically defines a list of forecast leads to + +Prior to v5.0.0, only the year, month, and day (YYYYMMDD) of the init/valid +begin and end times were read by the wrapper. The hours, minutes, and seconds +were ignored to be filtered using FCST_HOUR_LIST and OBS_HOUR_LIST. +Now the full time information is read and to enable users to process a more +specific range of time. To preserve the original behavior, end times that +do not include hours, minutes, or seconds will process up to 23:59:59 on that +day unless specific hours are defined with FCST_HOUR_LIST or OBS_HOUR_LIST. + +Note: The LEAD_SEQ variable that typically defines a list of forecast leads to process is not used by the wrapper. Instead the FCST_LEAD_LIST and OBS_LEAD_LIST are used to filter out forecast leads from the data. From 685d25e374a8aa72ae132ddfd7276929daab5ada Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 15:14:45 -0600 Subject: [PATCH 64/92] Per #1687, removed LOOP_ORDER from all config files --- .../met_tool_wrapper/ASCII2NC/ASCII2NC.conf | 2 - .../ASCII2NC/ASCII2NC_python_embedding.conf | 2 - .../EnsembleStat/EnsembleStat.conf | 2 - .../EnsembleStat_python_embedding.conf | 2 - .../met_tool_wrapper/Example/Example.conf | 2 - .../GempakToCF/GempakToCF.conf | 2 - .../GenEnsProd/GenEnsProd.conf | 2 - .../met_tool_wrapper/GenVxMask/GenVxMask.conf | 2 - .../GenVxMask/GenVxMask_multiple.conf | 2 - .../GenVxMask/GenVxMask_with_arguments.conf | 2 - .../met_tool_wrapper/GridDiag/GridDiag.conf | 3 - .../met_tool_wrapper/GridStat/GridStat.conf | 2 - .../GridStat/GridStat_python_embedding.conf | 2 - .../met_tool_wrapper/METdbLoad/METdbLoad.conf | 2 - .../use_cases/met_tool_wrapper/MODE/MODE.conf | 2 - .../MODE/MODE_python_embedding.conf | 2 - parm/use_cases/met_tool_wrapper/MTD/MTD.conf | 2 - .../MTD/MTD_python_embedding.conf | 2 - .../PCPCombine/PCPCombine_add.conf | 2 - .../PCPCombine/PCPCombine_bucket.conf | 2 - .../PCPCombine/PCPCombine_derive.conf | 2 - .../PCPCombine/PCPCombine_loop_custom.conf | 2 - .../PCPCombine_python_embedding.conf | 2 - .../PCPCombine/PCPCombine_subtract.conf | 1 - .../PCPCombine/PCPCombine_sum.conf | 2 - .../PCPCombine/PCPCombine_user_defined.conf | 2 - .../PlotDataPlane/PlotDataPlane_grib1.conf | 2 - .../PlotDataPlane/PlotDataPlane_netcdf.conf | 1 - .../PlotDataPlane_python_embedding.conf | 2 - .../Point2Grid/Point2Grid.conf | 2 - .../PointStat/PointStat_once_per_field.conf | 2 - .../PointStat/PointStat_python_embedding.conf | 2 - .../PyEmbedIngest/PyEmbedIngest.conf | 2 - .../PyEmbedIngest_multi_field_one_file.conf | 2 - .../RegridDataPlane/RegridDataPlane.conf | 2 - ...egridDataPlane_multi_field_multi_file.conf | 2 - .../RegridDataPlane_multi_field_one_file.conf | 2 - .../RegridDataPlane_python_embedding.conf | 2 - .../SeriesAnalysis_python_embedding.conf | 2 - .../StatAnalysis_python_embedding.conf | 1 + .../met_tool_wrapper/TCGen/TCGen.conf | 2 - .../TCMPRPlotter/TCMPRPlotter.conf | 2 - .../TCPairs/TCPairs_tropical.conf | 2 - .../met_tool_wrapper/TCRMW/TCRMW.conf | 2 - .../met_tool_wrapper/TCStat/TCStat.conf | 2 - .../UserScript/UserScript_run_once.conf | 2 - .../UserScript_run_once_for_each.conf | 2 - .../UserScript_run_once_per_init.conf | 2 - .../UserScript_run_once_per_lead.conf | 63 ++++---- .../UserScript_run_once_per_valid.conf | 2 - .../EnsembleStat_fcstICAP_obsMODIS_aod.conf | 2 - .../GridStat_fcstCESM_obsGFS_ConusTemp.conf | 2 - ...E_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf | 2 - ...tHAFS_obsPrepBufr_JEDI_IODA_interface.conf | 2 - ...GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf | 2 - ...Stat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf | 2 - ...dStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf | 1 - .../GridStat_fcstRTOFS_obsOSTIA_iceCover.conf | 2 - ...ridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf | 2 - ...ridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf | 1 - .../PlotDataPlane_obsHYCOM_coordTripolar.conf | 2 - ...cript_fcstRTOFS_obsAOML_calcTransport.conf | 2 - ...ridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf | 2 - ...ridStat_fcstGFS_obsGFS_Sfc_MultiField.conf | 2 - ...t_fcstGFS_obsGFS_climoNCEP_MultiField.conf | 2 - ...S_obsGFS_FeatureRelative_SeriesByLead.conf | 2 - ..._obsGDAS_UpperAir_MultiField_PrepBufr.conf | 2 - ...cstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf | 2 - ...S_obsGFS_FeatureRelative_SeriesByInit.conf | 2 - ...S_obsGFS_FeatureRelative_SeriesByLead.conf | 2 - ...esByLead_PyEmbed_Multiple_Diagnostics.conf | 2 - .../UserScript_fcstGEFS_Difficulty_Index.conf | 2 - .../EnsembleStat_fcstWOFS_obsWOFS.conf | 2 - .../GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf | 2 - .../GridStat_fcstGFS_obsCCPA_GRIB.conf | 2 - ...GridStat_fcstHREFmean_obsStgIV_Gempak.conf | 2 - ...GridStat_fcstHREFmean_obsStgIV_NetCDF.conf | 1 - .../GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf | 2 - ...HRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf | 2 - .../MTD_fcstHRRR-TLE_obsMRMS.conf | 2 - ...sis_fcstNMME_obsCPC_seasonal_forecast.conf | 2 - ...at_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf | 2 - ..._climoStandardized_MultiStatisticTool.conf | 2 - .../s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf | 2 - ...serScript_obsERA_obsOnly_Stratosphere.conf | 2 - .../UserScript_fcstGFS_obsERA_Blocking.conf | 2 - ...erScript_fcstGFS_obsERA_WeatherRegime.conf | 2 - .../UserScript_obsERA_obsOnly_Blocking.conf | 3 - ...erScript_obsERA_obsOnly_WeatherRegime.conf | 2 - .../UserScript_fcstGFS_obsERA_OMI.conf | 2 - .../UserScript_obsCFSR_obsOnly_MJO_ENSO.conf | 143 +++++++++++------- .../UserScript_obsERA_obsOnly_OMI.conf | 3 - ...serScript_obsERA_obsOnly_PhaseDiagram.conf | 2 - .../UserScript_obsERA_obsOnly_RMM.conf | 2 - ...tat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf | 2 - ...rod_fcstHRRR_fcstOnly_SurrogateSevere.conf | 2 - ...at_fcstFV3_obsGOES_BrightnessTempDmap.conf | 2 - ...stHRRR_obsPracPerfect_SurrogateSevere.conf | 1 - ...RR_obsPracPerfect_SurrogateSevereProb.conf | 1 - ...dbLoad_fcstFV3_obsGoes_BrightnessTemp.conf | 2 - .../MODE_fcstFV3_obsGOES_BrightnessTemp.conf | 2 - ...DE_fcstFV3_obsGOES_BrightnessTempObjs.conf | 1 - .../MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf | 2 - ...rid_obsLSR_ObsOnly_PracticallyPerfect.conf | 2 - ...sk_fcstGloTEC_FcstOnly_solar_altitude.conf | 2 - .../GridStat_fcstGloTEC_obsGloTEC_vx7.conf | 1 - ...ter_fcstGFS_obsGFS_UserScript_ExtraTC.conf | 2 - .../GridStat_fcstHAFS_obsTDR_NetCDF.conf | 2 - .../Plotter_fcstGFS_obsGFS_ExtraTC.conf | 2 - .../Plotter_fcstGFS_obsGFS_RPlotting.conf | 1 - .../TCGen_fcstGFS_obsBDECK_2021season.conf | 2 - .../TCRMW_fcstGFS_fcstOnly_gonzalo.conf | 2 - ...I2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf | 2 - 113 files changed, 120 insertions(+), 300 deletions(-) diff --git a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf index 45c6e7531f..eb91432923 100644 --- a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf +++ b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf index f2e61547f7..4e008c316c 100644 --- a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf index 81233349e0..8d4a0dcbdd 100644 --- a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf +++ b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf index 95eea3c846..7b76978faa 100644 --- a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/Example/Example.conf b/parm/use_cases/met_tool_wrapper/Example/Example.conf index 096f7aae1f..f20cee6375 100644 --- a/parm/use_cases/met_tool_wrapper/Example/Example.conf +++ b/parm/use_cases/met_tool_wrapper/Example/Example.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 6H LEAD_SEQ = 3H, 6H, 9H, 12H -LOOP_ORDER = times - EXAMPLE_CUSTOM_LOOP_LIST = ext, nc diff --git a/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf b/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf index 4e0591ad83..142bd453e3 100644 --- a/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf +++ b/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=12H LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf b/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf index 997d0d6f92..696abe4008 100644 --- a/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf +++ b/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 24H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf index c601cefde1..9b49581368 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf index 92f08c16a4..4982977858 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf index 3c8009e848..33f9a30b58 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf b/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf index f2d3adb84f..c35fe3994a 100644 --- a/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf +++ b/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf @@ -36,9 +36,6 @@ INIT_INCREMENT = 21600 LEAD_SEQ = 141, 144, 147 -LOOP_ORDER = processes - - ### # File I/O # https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#directory-and-filename-template-info diff --git a/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf b/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf index 0ccb837af2..e35f9dd74c 100644 --- a/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf +++ b/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf index 46a2354fae..3915618372 100644 --- a/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf b/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf index 1ddcb184ae..72ad9f01f1 100644 --- a/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf +++ b/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf @@ -34,8 +34,6 @@ VALID_BEG = 2005080712 VALID_END = 2005080800 VALID_INCREMENT = 12H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MODE/MODE.conf b/parm/use_cases/met_tool_wrapper/MODE/MODE.conf index 7d094f1f0c..30ed8a2f16 100644 --- a/parm/use_cases/met_tool_wrapper/MODE/MODE.conf +++ b/parm/use_cases/met_tool_wrapper/MODE/MODE.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf b/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf index 84cc1e417a..4bf6f106ad 100644 --- a/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MTD/MTD.conf b/parm/use_cases/met_tool_wrapper/MTD/MTD.conf index 8098d9c82f..54ef200032 100644 --- a/parm/use_cases/met_tool_wrapper/MTD/MTD.conf +++ b/parm/use_cases/met_tool_wrapper/MTD/MTD.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=1M LEAD_SEQ = 6H, 9H, 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf b/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf index bf0d0c4d90..ae250104af 100644 --- a/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=1M LEAD_SEQ = 0, 1, 2 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf index 48126f344b..529e6999ca 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 15M -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf index ded9c6cd02..bd8ca11286 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 15H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf index da89543b9c..b2f8d6f637 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf index dcc7aa1269..64d449d8ba 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 24H PCP_COMBINE_CUSTOM_LOOP_LIST = arw-fer-gep1, arw-fer-gep5, arw-sch-gep2, arw-sch-gep6, arw-tom-gep3, arw-tom-gep7 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf index 8aaa2fff3a..d9cd56f96e 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=43200 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf index 63cebfefe2..caf0890409 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf @@ -34,7 +34,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 18H -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf index bbc63b60e0..bdfa337ebd 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 15M -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf index c0503bcede..a30bc6bd30 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf @@ -34,8 +34,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf index 1fd05a6180..f67abc6065 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf index 38edc72430..b98422004c 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf index 5e4b3f5c00..5245f5a5be 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf b/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf index 7d0a9e5a23..50646d631a 100644 --- a/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf +++ b/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 24H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf index 3a1d098f65..054c1ef9c8 100644 --- a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf +++ b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf index e22eae5194..e22d62d01b 100644 --- a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf index 6114202c61..4388fb0574 100644 --- a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf +++ b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # PyEmbedIngest Settings diff --git a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf index 5f432ae4a2..4c213f5208 100644 --- a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf +++ b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # PyEmbedIngest Settings diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf index d228cd7b6d..1a9940e237 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf index dff538882f..667fbb550f 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf index f620440605..6dbd0dd9fe 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf index 31b2944215..177353c981 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 3 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf b/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf index 097267655a..a685eabee9 100644 --- a/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf @@ -35,8 +35,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = processes - SERIES_ANALYSIS_CUSTOM_LOOP_LIST = diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf index 43ca0cc64e..edc3af4721 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf @@ -24,6 +24,7 @@ PROCESS_LIST = StatAnalysis # LEAD_SEQ is the list of forecast leads to process # https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control ### + LOOP_BY = VALID VALID_TIME_FMT = %Y%m%d VALID_BEG=20070331 diff --git a/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf b/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf index 8d25a7feaa..ace36034eb 100644 --- a/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf +++ b/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf @@ -34,8 +34,6 @@ LEAD_SEQ = TC_GEN_CUSTOM_LOOP_LIST = -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf b/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf index 38a2046a4e..29f10457b7 100644 --- a/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf +++ b/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf @@ -31,8 +31,6 @@ INIT_BEG = 201503 INIT_END = 201503 INIT_INCREMENT = 6H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf b/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf index 680a1515a6..f7b103720a 100644 --- a/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf +++ b/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 #TC_PAIRS_SKIP_LEAD_SEQ = False -LOOP_ORDER = times - TC_PAIRS_RUN_ONCE = False diff --git a/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf b/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf index e92c2ba534..6d341db0a6 100644 --- a/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf +++ b/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf @@ -31,8 +31,6 @@ INIT_BEG = 2016092900 INIT_END = 2016092900 INIT_INCREMENT = 21600 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf b/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf index d791c3ffeb..cb6eb51d8c 100644 --- a/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf +++ b/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf @@ -31,8 +31,6 @@ INIT_BEG = 2019103112 INIT_END = 2019103112 INIT_INCREMENT = 6H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf index 036913a999..704b434f44 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 15H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf index d135fa6be2..9a3247f45f 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf index dccb808ed6..3dcb9a86fd 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf index e8b6c95587..560e652f45 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf @@ -1,56 +1,55 @@ -# UserScript wrapper example - [config] -# List of applications to run - only UserScript for this case +# Documentation for this use case can be found at +# https://metplus.readthedocs.io/en/latest/generated/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.html + +# For additional information, please see the METplus Users Guide. +# https://metplus.readthedocs.io/en/latest/Users_Guide + +### +# Processes to run +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#process-list +### + PROCESS_LIST = UserScript -# time looping - options are INIT, VALID, RETRO, and REALTIME +### +# Time Info +# LOOP_BY options are INIT, VALID, RETRO, and REALTIME # If set to INIT or RETRO: # INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set # If set to VALID or REALTIME: # VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set -LOOP_BY = INIT +# LEAD_SEQ is the list of forecast leads to process +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control +### -# Format of INIT_BEG and INIT_END using % items -# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc. -# see www.strftime.org for more information -# %Y%m%d%H expands to YYYYMMDDHH -INIT_TIME_FMT = %Y%m%d%H%M%S +USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -# Start time for METplus run - must match VALID_TIME_FMT +LOOP_BY = INIT +INIT_TIME_FMT = %Y%m%d%H%M%S INIT_BEG = 20141031093015 - -# End time for METplus run - must match VALID_TIME_FMT INIT_END = 20141101093015 - -# Increment between METplus runs (in seconds if no units are specified) -# Must be >= 60 seconds INIT_INCREMENT = 12H -# List of forecast leads to process for each run time (init or valid) -# In hours if units are not specified -# If unset, defaults to 0 (don't loop through forecast leads) LEAD_SEQ = 0H, 12H, 24H, 120H -# Order of loops to process data - Options are times, processes -# Not relevant if only one item is in the PROCESS_LIST -# times = run all wrappers in the PROCESS_LIST for a single run time, then -# increment the run time and run all wrappers again until all times have -# been evaluated. -# processes = run the first wrapper in the PROCESS_LIST for all times -# specified, then repeat for the next item in the PROCESS_LIST until all -# wrappers have been run -LOOP_ORDER = processes - -# list of strings to loop over for each run time. -# value for each item can be referenced in filename templates with {custom?fmt=%s} USER_SCRIPT_CUSTOM_LOOP_LIST = nc -USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD + +### +# File I/O +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#directory-and-filename-template-info +### USER_SCRIPT_INPUT_TEMPLATE = init_{init?fmt=%Y%m%d%H%M%S}_valid_{valid?fmt=%Y%m%d%H%M%S}_lead_{lead?fmt=%3H}.{custom} USER_SCRIPT_INPUT_DIR = {INPUT_BASE}/met_test/new/test + +### +# UserScript Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#userscript +### + USER_SCRIPT_COMMAND = {PARM_BASE}/use_cases/met_tool_wrapper/UserScript/print_file_list.py diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf index 89092717a4..02a2b295d5 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf b/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf index 213142de06..594a0e1653 100644 --- a/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf +++ b/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=06H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf b/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf index bc6b3cc261..f476382081 100644 --- a/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf +++ b/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 6, 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf b/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf index 4f304f865d..0535ff7da8 100644 --- a/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf +++ b/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 24, 48 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf b/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf index 2cb82bf767..f82ffcdc89 100644 --- a/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf +++ b/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf @@ -34,8 +34,6 @@ VALID_INCREMENT = 12H LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf index 6a0df6836a..410d694207 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf @@ -32,8 +32,6 @@ VALID_BEG=20190201 VALID_END=20190201 VALID_INCREMENT=86400 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf index 5679242bd3..e33505f894 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf index 2fbb89a9d0..70ff844d25 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf index 1f80253a32..4868d99bf5 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0 GRID_STAT_CUSTOM_LOOP_LIST = north, south -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf index 11804aeb48..f1d13bb482 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf index 8d26d9caed..a89fa70127 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf b/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf index a7f6c51d26..3a9e68c152 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0 PLOT_DATA_PLANE_CUSTOM_LOOP_LIST = north, south -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf b/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf index fea9f36ee9..75cc0c6d9b 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf @@ -31,8 +31,6 @@ VALID_INCREMENT = 24H LEAD_SEQ = -LOOP_ORDER = processes - USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf index 7bce8e2c66..6152afdb63 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf @@ -12,8 +12,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 8d -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf index 20ad10659b..fea4677c2d 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf index 28c513fdd1..8e8eb73c06 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 24, 48 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf b/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf index 77110adf82..c9b54fc876 100644 --- a/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf +++ b/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 6H LEAD_SEQ = begin_end_incr(0,30,6) -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf index ea774a66e7..4a11246bfa 100644 --- a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf +++ b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf index b722dd49ac..3d2fd7ee64 100644 --- a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf +++ b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf @@ -32,8 +32,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf index 65dbe5ed8f..8188d1a56a 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf @@ -35,8 +35,6 @@ SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID SERIES_ANALYSIS_RUN_ONCE_PER_STORM_ID = True -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf index 32a43b9f12..dd0fc48274 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf @@ -37,8 +37,6 @@ LEAD_SEQ_1_LABEL = Day1 LEAD_SEQ_2 = begin_end_incr(24,42,6) LEAD_SEQ_2_LABEL = Day2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf index 4074f8d415..6cf9ba3c70 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf @@ -37,8 +37,6 @@ SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD SERIES_ANALYSIS_RUN_ONCE_PER_STORM_ID = False -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf b/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf index 06d8309c22..bd7820e33a 100644 --- a/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf +++ b/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf @@ -37,8 +37,6 @@ USER_SCRIPT_CUSTOM_LOOP_LIST = nc USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_FOR_EACH -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf b/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf index 03904ba0e4..64814fce78 100644 --- a/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf +++ b/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf b/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf index 18a5e568ce..7f1eb54780 100644 --- a/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf +++ b/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 3,6,9,12 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf index ae536ee115..51f74951c2 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf index 2964e6b599..5c9fd77b41 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 18 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf index 9ef9bb8df2..6428e91227 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf @@ -33,7 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 18 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf index 1f88a2e87c..8caa067405 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf @@ -34,8 +34,6 @@ INIT_INCREMENT=60 LEAD_SEQ = 6, 7 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf index e8e4c525fb..0864036901 100644 --- a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=86400 LEAD_SEQ = begin_end_incr(12, 0, -1) -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf index 598e1a9a3e..b81242a67c 100644 --- a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf +++ b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 1,2,3 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf b/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf index 52a98f302e..c30b22a23c 100644 --- a/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf +++ b/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 1m, 2m, 3m, 4m, 5m, 6m SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf b/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf index 59b26fb25c..88b0036351 100644 --- a/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf +++ b/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1Y LEAD_SEQ = -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf b/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf index 7e32b1e8d4..2bbc1b76b3 100644 --- a/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf +++ b/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf @@ -36,8 +36,6 @@ LEAD_SEQ = SERIES_ANALYSIS_CUSTOM_LOOP_LIST = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23 SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf b/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf index 5c02108948..5f0a273add 100644 --- a/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf +++ b/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf @@ -33,8 +33,6 @@ LEAD_SEQ = USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_FOR_EACH -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf b/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf index 86c4e5264a..0d81594060 100644 --- a/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf +++ b/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf @@ -32,8 +32,6 @@ VALID_BEG = 2013 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE -LOOP_ORDER = processes - ### # UserScript Settings diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf index 6bb4c9f07e..4bad3cef08 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf @@ -36,8 +36,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf index 4d74927724..37b224e454 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf @@ -41,8 +41,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:1201,0229" -LOOP_ORDER = processes - ### # RegridDataPlane(regrid_obs) Settings diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf index 0b4a32cca6..526074c95f 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf @@ -40,9 +40,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" -LOOP_ORDER = processes - - # Run the obs data # A variable set to be used in the pre-processing steps OBS_RUN = True diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf index 1d0583ea44..47d8367071 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf @@ -41,8 +41,6 @@ SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -LOOP_ORDER = processes - ### # RegridDataPlane(regrid_obs) Settings diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf index 63fd483b91..38dd0439ba 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf @@ -37,8 +37,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf index 115bb060a3..6bb38ebf60 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf @@ -1,6 +1,16 @@ -# MJO_ENSO UserScript wrapper - [config] + +# Documentation for this use case can be found at +# https://metplus.readthedocs.io/en/latest/generated/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.html + +# For additional information, please see the METplus Users Guide. +# https://metplus.readthedocs.io/en/latest/Users_Guide + +### +# Processes to run +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#process-list +### + # All steps, including creating daily means and mean daily annual cycle #PROCESS_LIST = RegridDataPlane(regrid_obs_taux), RegridDataPlane(regrid_obs_tauy), RegridDataPlane(regrid_obs_sst), RegridDataPlane(regrid_obs_ucur), RegridDataPlane(regrid_obs_vcur), UserScript(script_mjo_enso) # Computing regridding, and MJO ENSO Analysis script @@ -8,50 +18,38 @@ PROCESS_LIST = UserScript(script_mjo_enso) -# time looping - options are INIT, VALID, RETRO, and REALTIME + +### +# Time Info +# LOOP_BY options are INIT, VALID, RETRO, and REALTIME # If set to INIT or RETRO: # INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set # If set to VALID or REALTIME: # VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set -LOOP_BY = VALID - -# Format of VALID_BEG and VALID_END using % items -# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc. -# see www.strftime.org for more information -# %Y%m%d%H expands to YYYYMMDDHH -VALID_TIME_FMT = %Y%m%d +# LEAD_SEQ is the list of forecast leads to process +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control +### -# Start time for METplus run -VALID_BEG = 19900101 -# End time for METplus run +LOOP_BY = VALID +VALID_TIME_FMT = %Y%m%d +VALID_BEG = 19900101 VALID_END = 20211231 - -# Increment between METplus runs in seconds. Must be >= 60 VALID_INCREMENT = 86400 -# List of forecast leads to process for each run time (init or valid) -# In hours if units are not specified -# If unset, defaults to 0 (don't loop through forecast leads) LEAD_SEQ = 0 -# Order of loops to process data - Options are times, processes -# Not relevant if only one item is in the PROCESS_LIST -# times = run all wrappers in the PROCESS_LIST for a single run time, then -# increment the run time and run all wrappers again until all times have -# been evaluated. -# processes = run the first wrapper in the PROCESS_LIST for all times -# specified, then repeat for the next item in the PROCESS_LIST until all -# wrappers have been run -LOOP_ORDER = processes - -# location of configuration files used by MET applications -CONFIG_DIR={PARM_BASE}/use_cases/model_applications/s2s_mjo # Run the obs for these cases OBS_RUN = True FCST_RUN = False + +### +# RegridDataPlane Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Mask to use for regridding REGRID_DATA_PLANE_VERIF_GRID = latlon 156 61 -30 125 1 1 @@ -62,6 +60,11 @@ REGRID_DATA_PLANE_METHOD = NEAREST REGRID_DATA_PLANE_WIDTH = 1 +### +# RegridDataPlane(regrid_obs_taux) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid OLR to -15 to 15 latitude [regrid_obs_taux] # Run regrid_data_plane on forecast data @@ -87,6 +90,11 @@ OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.n OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE =cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc +### +# RegridDataPlane(regrid_obs_tauy) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid meridional wind stress [regrid_obs_tauy] # Run regrid_data_plane on forecast data @@ -111,6 +119,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_sst) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid sst [regrid_obs_sst] # Run regrid_data_plane on forecast data @@ -133,6 +147,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_sst_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_sst_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_ucur) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid zonal ocean current [regrid_obs_ucur] # Run regrid_data_plane on forecast data @@ -155,6 +175,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_vcur) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid meridional ocean current [regrid_obs_vcur] # Run regrid_data_plane on forecast data @@ -179,6 +205,32 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +### +# UserScript(script_mjo_enso) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#userscript +### + +# Configurations for UserScript: Run the MJO_ENSO Analysis driver +[script_mjo_enso] +# list of strings to loop over for each run time. +# Run the user script once per lead +USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD + +# Template of filenames to input to the user-script +#USER_SCRIPT_INPUT_TEMPLATE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +USER_SCRIPT_INPUT_TEMPLATE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +# Name of the file containing the listing of input files +# The options are OBS_TAUX_INPUT, OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT, FCST_TAUX_INPUT, FCST_TAUY_INPUT, FCST_SST_INPUT, FCST_UCUR_INPUT,and FCST_VCUR_INPUT +# *** Make sure the order is the same as the order of templates listed in USER_SCRIPT_INPUT_TEMPLATE +USER_SCRIPT_INPUT_TEMPLATE_LABELS = OBS_TAUX_INPUT,OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT + +# Command to run the user script with input configuration file +USER_SCRIPT_COMMAND = {METPLUS_BASE}/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/mjo_enso_driver.py + + # Configurations for the MJO-ENSO analysis script [user_env_vars] # Whether to Run the model or obs @@ -192,18 +244,18 @@ SCRIPT_OUTPUT_BASE = {OUTPUT_BASE} OBS_PER_DAY = 1 # Variable names for TAUX, TAUY, SST, UCUR, VCUR -OBS_TAUX_VAR_NAME = uflx -OBS_TAUY_VAR_NAME = vflx -OBS_SST_VAR_NAME = sst +OBS_TAUX_VAR_NAME = uflx +OBS_TAUY_VAR_NAME = vflx +OBS_SST_VAR_NAME = sst OBS_UCUR_VAR_NAME = u OBS_VCUR_VAR_NAME = v # EOF Filename -TAUX_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_uflx_eof.nc +TAUX_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_uflx_eof.nc TAUY_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_vflx_eof.nc WMJOK_SST_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_multivarEOF.nc -# Filters weights +# Filters weights TAUX_Filter1_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/taux.filter1.txt TAUX_Filter2_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/taux.filter2.txt TAUY_Filter1_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/tauy.filter1.txt @@ -222,24 +274,5 @@ MAKE_MAKI_OUTPUT_TEXT_FILE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJ PLOT_TIME_BEG = 19900101 PLOT_TIME_END = 20211231 PLOT_TIME_FMT = {VALID_TIME_FMT} -OBS_PLOT_OUTPUT_NAME = MAKE_MAKI_timeseries +OBS_PLOT_OUTPUT_NAME = MAKE_MAKI_timeseries OBS_PLOT_OUTPUT_FORMAT = png - -# Configurations for UserScript: Run the MJO_ENSO Analysis driver -[script_mjo_enso] -# list of strings to loop over for each run time. -# Run the user script once per lead -USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD - -# Template of filenames to input to the user-script -#USER_SCRIPT_INPUT_TEMPLATE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc - -USER_SCRIPT_INPUT_TEMPLATE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc - -# Name of the file containing the listing of input files -# The options are OBS_TAUX_INPUT, OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT, FCST_TAUX_INPUT, FCST_TAUY_INPUT, FCST_SST_INPUT, FCST_UCUR_INPUT,and FCST_VCUR_INPUT -# *** Make sure the order is the same as the order of templates listed in USER_SCRIPT_INPUT_TEMPLATE -USER_SCRIPT_INPUT_TEMPLATE_LABELS = OBS_TAUX_INPUT,OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT - -# Command to run the user script with input configuration file -USER_SCRIPT_COMMAND = {METPLUS_BASE}/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/mjo_enso_driver.py diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf index 8f7a87db21..c2fe8ab7c5 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf @@ -16,9 +16,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf index ed50889f35..015b5f195b 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf index add8fae514..352a49f39d 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf @@ -37,8 +37,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf b/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf index 02e6da04eb..73be8cda91 100644 --- a/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf +++ b/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 0,1,2 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf b/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf index e63c21a0e6..df6ead3e55 100644 --- a/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf +++ b/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=86400 LEAD_SEQ = 36 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf index 2deafe1408..1452ca4cde 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf index 0301941e0f..f5957064ba 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf @@ -35,7 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MIN = 36 LEAD_SEQ_MAX = 36 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf index 24c3be3533..822e28e8ce 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf @@ -35,7 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MIN = 36 LEAD_SEQ_MAX = 36 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf b/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf index 79ef5d57fb..3ac50c4d4b 100644 --- a/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf +++ b/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf @@ -32,8 +32,6 @@ VALID_BEG = 2019052112 VALID_END = 2019052100 VALID_INCREMENT = 12H -LOOP_ORDER = processes - MET_DB_LOAD_RUNTIME_FREQ = RUN_ONCE diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf index d85396cee9..b658849cf2 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf index ff97185d2a..9a8d8b5cbe 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf @@ -33,7 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf b/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf index 16f73d3674..2983b999cb 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf @@ -35,8 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MAX = 36 LEAD_SEQ_MIN = 12 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf b/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf index 5255814797..5f24ac8034 100644 --- a/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf +++ b/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 24H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf b/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf index a2f6d992ba..d6a4a7df15 100644 --- a/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf +++ b/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf @@ -36,8 +36,6 @@ VALID_INCREMENT = 600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf b/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf index 04e2575d10..dd6494eb83 100644 --- a/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf +++ b/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf @@ -36,7 +36,6 @@ VALID_INCREMENT = 600 LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf b/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf index ef1344706e..1e3b3fc459 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf b/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf index 38b229e0f3..6c28e387ad 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0,6,12,18 CUSTOM_LOOP_LIST = 190829H1 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf index 60bf71574c..e45b93aa0c 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 TC_PAIRS_RUN_ONCE = True -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf index dbd4e39267..8548153705 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf @@ -31,7 +31,6 @@ INIT_BEG = 20141214 INIT_END = 20141214 INIT_INCREMENT = 21600 ;; set to every 6 hours=21600 seconds -LOOP_ORDER = processes ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf b/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf index 3ea343ac81..50aee90b90 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf @@ -29,8 +29,6 @@ LOOP_BY = INIT INIT_TIME_FMT = %Y INIT_BEG = 2021 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf b/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf index f44ee2d2c0..4782ccae8b 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf @@ -34,8 +34,6 @@ INIT_INCREMENT = 6H LEAD_SEQ = begin_end_incr(0, 24, 6) #LEAD_SEQ = begin_end_incr(0, 126, 6) -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf b/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf index 409c3422ca..5cc81d15d1 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0,6,12,18 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID -LOOP_ORDER = processes - ### # File I/O From e978a518448b84a1953be63465454151344b3019 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 15:16:45 -0600 Subject: [PATCH 65/92] Per #1687, removed LOOP_ORDER from default config --- parm/metplus_config/defaults.conf | 3 --- 1 file changed, 3 deletions(-) diff --git a/parm/metplus_config/defaults.conf b/parm/metplus_config/defaults.conf index 36c359d6d8..c1e340545a 100644 --- a/parm/metplus_config/defaults.conf +++ b/parm/metplus_config/defaults.conf @@ -67,9 +67,6 @@ GFDL_TRACKER_EXEC = /path/to/standalone_gfdl-vortextracker_v3.9a/trk_exec # Set to False to preserve these files # ############################################################################### - -LOOP_ORDER = processes - PROCESS_LIST = Usage OMP_NUM_THREADS = 1 From c5061abe2255c8d42dcb696d9178211b677406ba Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 14 Oct 2022 15:21:24 -0600 Subject: [PATCH 66/92] added missing rst characters --- docs/Users_Guide/wrappers.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 596679357b..5f5c65b0a4 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6925,9 +6925,9 @@ setting :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` and optionally Output files specified with the -dump_row or -out_stat arguments must be defined in a job using :term:`STAT_ANALYSIS_JOB\`. The [dump_row_file] keyword can be added to a job after the -dump_row argument -only if a :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE is set. Similarly, +only if a :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` is set. Similarly, the [out_stat_file] keyword can be added to a job after the -out_stat argument -only if a :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE is set. +only if a :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` is set. METplus Configuration From 39e2c41cbf0900cfb1894ff8cf05c0b2fd2b669e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 17 Oct 2022 11:44:26 -0600 Subject: [PATCH 67/92] moved utilities to parse time lists from strings to time_util, cleaned up logic to set command line arguments to simplify how command is built, added documentation to functions and cleaned up code --- .../pytests/util/time_util/test_time_util.py | 19 +++ .../stat_analysis/test_stat_analysis.py | 22 +-- metplus/util/time_util.py | 69 ++++++++ metplus/wrappers/stat_analysis_wrapper.py | 159 ++++++++---------- 4 files changed, 160 insertions(+), 109 deletions(-) diff --git a/internal/tests/pytests/util/time_util/test_time_util.py b/internal/tests/pytests/util/time_util/test_time_util.py index 86da6b3140..6d133bd67e 100644 --- a/internal/tests/pytests/util/time_util/test_time_util.py +++ b/internal/tests/pytests/util/time_util/test_time_util.py @@ -8,6 +8,25 @@ from metplus.util import time_util +@pytest.mark.parametrize( + 'input_str, expected_output', [ + ('', []), + ('0,1,2,3', ['000000', '010000', '020000', '030000']), + ('12, 24', ['120000', '240000']), + ('196', ['1960000']), + ('12H, 24H', ['120000', '240000']), + ('45M', ['004500']), + ('42S', ['000042']), + ('24, 48, 72, 96, 120, 144, 168, 192, 216, 240', + ['240000', '480000', '720000', '960000', '1200000', + '1440000', '1680000', '1920000', '2160000', '2400000']), + ] +) +@pytest.mark.wrapper_d +def test_get_met_time_list(input_str, expected_output): + assert time_util.get_met_time_list(input_str) == expected_output + + @pytest.mark.parametrize( 'rd, seconds, time_string, time_letter_only, hours', [ (relativedelta(seconds=1), 1, '1 second', '1S', 0), diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index fe11d3b22f..b408d3269b 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -231,26 +231,6 @@ def test_format_conf_list(metplus_config, list_name, config_overrides, assert wrapper._format_conf_list(list_name) == expected_value -@pytest.mark.parametrize( - 'input_str, expected_output', [ - ('', []), - ('0,1,2,3', ['000000', '010000', '020000', '030000']), - ('12, 24', ['120000', '240000']), - ('196', ['1960000']), - ('12H, 24H', ['120000', '240000']), - ('45M', ['004500']), - ('42S', ['000042']), - ('24, 48, 72, 96, 120, 144, 168, 192, 216, 240', - ['240000', '480000', '720000', '960000', '1200000', - '1440000', '1680000', '1920000', '2160000', '2400000']), - ] -) -@pytest.mark.wrapper_d -def test_get_met_time_list(metplus_config, input_str, expected_output): - wrapper = stat_analysis_wrapper(metplus_config) - assert wrapper._get_met_time_list(input_str) == expected_output - - @pytest.mark.wrapper_d def test_get_command(metplus_config): # Independently test that the stat_analysis command @@ -266,7 +246,7 @@ def test_get_command(metplus_config): +'-config /path/to/STATAnalysisConfig' ) st.c_dict['LOOKIN_DIR'] = '/path/to/lookin_dir' - st.c_dict['CONFIG_FILE'] = '/path/to/STATAnalysisConfig' + st.args.append('-config /path/to/STATAnalysisConfig') test_command = st.get_command() assert expected_command == test_command diff --git a/metplus/util/time_util.py b/metplus/util/time_util.py index a28fd63d58..e1bd4b1f93 100755 --- a/metplus/util/time_util.py +++ b/metplus/util/time_util.py @@ -32,6 +32,7 @@ 'S': 'second', } + def get_relativedelta(value, default_unit='S'): """!Converts time values ending in Y, m, d, H, M, or S to relativedelta object Args: @@ -80,6 +81,7 @@ def get_relativedelta(value, default_unit='S'): # unsupported time unit specified, return None return None + def get_seconds_from_string(value, default_unit='S', valid_time=None): """!Convert string of time (optionally ending with time letter, i.e. HMSyMD to seconds Args: @@ -89,12 +91,14 @@ def get_seconds_from_string(value, default_unit='S', valid_time=None): rd_obj = get_relativedelta(value, default_unit) return ti_get_seconds_from_relativedelta(rd_obj, valid_time) + def time_string_to_met_time(time_string, default_unit='S', force_hms=False): """!Convert time string (3H, 4M, 7, etc.) to format expected by the MET tools ([H]HH[MM[SS]])""" total_seconds = get_seconds_from_string(time_string, default_unit) return seconds_to_met_time(total_seconds, force_hms=force_hms) + def seconds_to_met_time(total_seconds, force_hms=False): seconds_time_string = str(total_seconds % 60).zfill(2) minutes_time_string = str(total_seconds // 60 % 60).zfill(2) @@ -109,6 +113,7 @@ def seconds_to_met_time(total_seconds, force_hms=False): else: return hour_time_string + def ti_get_hours_from_relativedelta(lead, valid_time=None): """! Get hours from relativedelta. Simply calls get seconds function and divides the result by 3600. @@ -129,6 +134,7 @@ def ti_get_hours_from_relativedelta(lead, valid_time=None): return lead_seconds // 3600 + def ti_get_seconds_from_relativedelta(lead, valid_time=None): """!Check relativedelta object contents and compute the total number of seconds in the time. Return None if years or months are set, because the exact number @@ -161,6 +167,7 @@ def ti_get_seconds_from_relativedelta(lead, valid_time=None): return total_seconds + def ti_get_seconds_from_lead(lead, valid='*'): if isinstance(lead, int): return lead @@ -172,6 +179,7 @@ def ti_get_seconds_from_lead(lead, valid='*'): return ti_get_seconds_from_relativedelta(lead, valid_time) + def ti_get_hours_from_lead(lead, valid='*'): lead_seconds = ti_get_seconds_from_lead(lead, valid) if lead_seconds is None: @@ -179,12 +187,14 @@ def ti_get_hours_from_lead(lead, valid='*'): return lead_seconds // 3600 + def get_time_suffix(letter, letter_only): if letter_only: return letter return f" {TIME_LETTER_TO_STRING[letter]} " + def format_time_string(lead, letter, plural, letter_only): if letter == 'Y': value = lead.years @@ -212,6 +222,7 @@ def format_time_string(lead, letter, plural, letter_only): return output + def ti_get_lead_string(lead, plural=True, letter_only=False): """!Check relativedelta object contents and create string representation of the highest unit available (year, then, month, day, hour, minute, second). @@ -250,6 +261,64 @@ def ti_get_lead_string(lead, plural=True, letter_only=False): return f"{negative}{output}" + +def get_met_time_list(string_value, sort_list=True): + """! Convert a string into a list of strings in MET time format HHMMSS. + + @param string_value input string to parse + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of strings with MET times + """ + return _format_time_list(string_value, get_met_format=True, + sort_list=sort_list) + + +def get_delta_list(string_value, sort_list=True): + """! Convert a string into a list of relativedelta objects. + + @param string_value input string to parse + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of relativedelta objects + """ + return _format_time_list(string_value, get_met_format=False, + sort_list=sort_list) + + +def _format_time_list(string_value, get_met_format, sort_list=True): + """! Helper function to convert a string into a list of times. + + @param string_value input string to parse + @param get_met_format If True, format the items in MET time format HHMMSS. + If False, format each item as a relativedelta object + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of either strings with MET times or relativedelta objects + """ + out_list = [] + if not string_value: + return [] + + for time_string in string_value.split(','): + time_string = time_string.strip() + if get_met_format: + value = time_string_to_met_time(time_string, default_unit='H', + force_hms=True) + out_list.append(value) + else: + delta_obj = get_relativedelta(time_string, default_unit='H') + out_list.append(delta_obj) + + if sort_list: + if get_met_format: + out_list.sort(key=int) + else: + out_list.sort(key=ti_get_seconds_from_relativedelta) + + return out_list + + def ti_calculate(input_dict_preserve): out_dict = {} input_dict = input_dict_preserve.copy() diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index c33342d82b..2b8074e15b 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1,14 +1,8 @@ -''' +"""! Program Name: stat_analysis_wrapper.py Contact(s): Mallory Row, George McCabe -Abstract: Runs stat_analysis -History Log: Fourth version -Usage: stat_analysis_wrapper.py -Parameters: None -Input Files: MET STAT files -Output Files: MET STAT files -Condition codes: 0 for success, 1 for failure -''' +Abstract: Builds commands to run stat_analysis +""" import os import glob @@ -21,11 +15,12 @@ from ..util import do_string_sub, find_indices_in_config_section from ..util import parse_var_list, remove_quotes, list_to_str from ..util import get_start_and_end_times -from ..util import time_string_to_met_time, get_relativedelta from ..util import ti_get_seconds_from_relativedelta +from ..util import get_met_time_list, get_delta_list from ..util import YMD, YMD_HMS from . import CommandBuilder + class StatAnalysisWrapper(CommandBuilder): """! Wrapper to the MET tool stat_analysis which is used to filter and summarize data from MET's point_stat, grid_stat, @@ -128,22 +123,15 @@ def __init__(self, config, instance=None): super().__init__(config, instance=instance) def get_command(self): + """! Build command to run. It is assumed that any errors preventing a + successfully run will have preventing this function from being called. - cmd = f"{self.app_path} -v {self.c_dict['VERBOSITY']}" - if self.args: - cmd += ' ' + ' '.join(self.args) - - cmd += ' -lookin ' + self.c_dict['LOOKIN_DIR'] - - if self.c_dict.get('CONFIG_FILE'): - cmd += f" -config {self.c_dict['CONFIG_FILE']}" - else: - cmd += f' {self.c_dict["JOB_ARGS"]}' - - if self.c_dict.get('OUTPUT_FILENAME'): - cmd += f" -out {self.c_dict['OUTPUT_FILENAME']}" + @returns string with command to run + """ + return (f"{self.app_path} -v {self.c_dict['VERBOSITY']}" + f" -lookin {self.c_dict['LOOKIN_DIR']}" + f" {' '.join(self.args)}").rstrip() - return cmd def create_c_dict(self): """! Create a data structure (dictionary) that contains all the @@ -248,7 +236,12 @@ def _read_jobs_from_config(self): return jobs def c_dict_error_check(self, c_dict, all_field_lists_empty): + """! Check values read into c_dict from METplusConfig and report errors + if anything is misconfigured. + @param c_dict dictionary containing config values to check + @param all_field_lists_empty True if no field lists were parsed + """ if not c_dict.get('CONFIG_FILE'): if len(c_dict['JOBS']) > 1: self.log_error( @@ -259,7 +252,7 @@ def c_dict_error_check(self, c_dict, all_field_lists_empty): self.logger.info("STAT_ANALYSIS_CONFIG_FILE not set. Passing " "job arguments to stat_analysis directly on " "the command line. This will bypass " - "any filtering done unless you add the " + "any filtering done unless you add the " "arguments to STAT_ANALYSIS_JOBS") if not c_dict['OUTPUT_DIR']: @@ -282,9 +275,6 @@ def c_dict_error_check(self, c_dict, all_field_lists_empty): conf = f"STAT_ANALYSIS_{conf}_TEMPLATE" self.log_error(f'Must set {conf} if [{check}] is used' ' in a job') - # error if they are found but their templates are not set - - for conf_list in self.LIST_CATEGORIES: if not c_dict[conf_list]: @@ -338,7 +328,7 @@ def _get_level_list(self, data_type): Format list items to match the format expected by StatAnalysis by removing parenthesis and any quotes, then adding back single quotes - Args: + @param data_type type of list to get, FCST or OBS @returns list containing the formatted level list """ @@ -356,11 +346,20 @@ def _get_level_list(self, data_type): return [f'"{item}"' for item in level_list] def _format_conf_list(self, conf_list): + """! Process config list. If list name (e.g. FCST_LEAD_LIST) is not + set, then check if numbered config variable (e.g. FCST_LEAD_LIST) + is set. Format thresholds lists as thresholds. Add quotation marks + around any list not found in the self.FORMAT_LISTS. Format lists will + be formatted later based on the loop/group conditions. + + @param conf_list name of METplus config variable to process + @returns list of items parsed from configuration + """ items = getlist( self.config.getraw('config', conf_list, '') ) - # if list if empty or unset, check for {LIST_NAME} + # if list is empty or unset, check for {LIST_NAME} if not items: indices = list( find_indices_in_config_section(fr'{conf_list}(\d+)$', @@ -397,41 +396,6 @@ def _format_conf_list(self, conf_list): return formatted_items - @staticmethod - def _format_time_list(string_value, get_met_format, sort_list=True): - out_list = [] - if not string_value: - return [] - for time_string in string_value.split(','): - time_string = time_string.strip() - if get_met_format: - value = time_string_to_met_time(time_string, default_unit='H', - force_hms=True) - out_list.append(value) - else: - delta_obj = get_relativedelta(time_string, default_unit='H') - out_list.append(delta_obj) - - if sort_list: - if get_met_format: - out_list.sort(key=int) - else: - out_list.sort(key=ti_get_seconds_from_relativedelta) - - return out_list - - @staticmethod - def _get_met_time_list(string_value, sort_list=True): - return StatAnalysisWrapper._format_time_list(string_value, - get_met_format=True, - sort_list=sort_list) - - @staticmethod - def _get_delta_list(string_value, sort_list=True): - return StatAnalysisWrapper._format_time_list(string_value, - get_met_format=False, - sort_list=sort_list) - def set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file should treat the items in that list as a group or items @@ -539,10 +503,9 @@ def build_stringsub_dict(self, config_dict): ) if 'HOUR' in list_name: - delta_list = self._get_delta_list(config_dict[list_name]) + delta_list = get_delta_list(config_dict[list_name]) if not delta_list: stringsub_dict[sub_name] = list_name_value - # TODO: should this be set to 0:0:0 to 23:59:59? stringsub_dict[sub_name + '_beg'] = relativedelta() stringsub_dict[sub_name + '_end'] = ( relativedelta(hours=+23, minutes=+59, seconds=+59) @@ -552,7 +515,7 @@ def build_stringsub_dict(self, config_dict): stringsub_dict[sub_name] = delta_list[0] else: stringsub_dict[sub_name] = ( - '_'.join(self._get_met_time_list(config_dict[list_name])) + '_'.join(get_met_time_list(config_dict[list_name])) ) stringsub_dict[sub_name + '_beg'] = delta_list[0] @@ -580,7 +543,7 @@ def build_stringsub_dict(self, config_dict): ) elif 'LEAD' in list_name: - lead_list = self._get_met_time_list(config_dict[list_name]) + lead_list = get_met_time_list(config_dict[list_name]) if not lead_list: continue @@ -593,7 +556,7 @@ def build_stringsub_dict(self, config_dict): stringsub_dict[sub_name] = lead_list[0] - lead_rd = self._get_delta_list(config_dict[list_name])[0] + lead_rd = get_delta_list(config_dict[list_name])[0] total_sec = ti_get_seconds_from_relativedelta(lead_rd) stringsub_dict[sub_name+'_totalsec'] = str(total_sec) @@ -659,12 +622,12 @@ def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): @param obs_hour_str string with list of observation hours to process """ if fcst_hour_str: - fcst_hour_list = self._get_delta_list(fcst_hour_str) + fcst_hour_list = get_delta_list(fcst_hour_str) else: fcst_hour_list = None if obs_hour_str: - obs_hour_list = self._get_delta_list(obs_hour_str) + obs_hour_list = get_delta_list(obs_hour_str) else: obs_hour_list = None @@ -759,12 +722,12 @@ def _set_strinsub_other(self, sub_dict, date_type, fcst_lead_str, @param obs_lead_str string to parse list of observation leads """ if fcst_lead_str: - fcst_lead_list = self._get_delta_list(fcst_lead_str) + fcst_lead_list = get_delta_list(fcst_lead_str) else: fcst_lead_list = None if obs_lead_str: - obs_lead_list = self._get_delta_list(obs_lead_str) + obs_lead_list = get_delta_list(obs_lead_str) else: obs_lead_list = None @@ -857,7 +820,7 @@ def get_lookin_dir(self, dir_path, config_dict): self.logger.debug(f"Expanding wildcard path: {one_path}") expand_path = glob.glob(one_path.strip()) if not expand_path: - self.logger.warning(f"Wildcard expansion found no matches") + self.logger.warning("Wildcard expansion found no matches") continue all_paths.extend(sorted(expand_path)) @@ -873,7 +836,7 @@ def format_valid_init(self, config_dict): """ for list_name in self.FORMAT_LISTS: list_name = list_name.replace('_LIST', '') - values = self._get_met_time_list(config_dict.get(list_name, '')) + values = get_met_time_list(config_dict.get(list_name, '')) values = [f'"{item}"' for item in values] config_dict[list_name] = ', '.join(values) @@ -992,7 +955,18 @@ def parse_model_info(self): def process_job_args(self, job_type, job, model_info, runtime_settings_dict): - + """! Get dump_row or out_stat file paths and replace [dump_row_file] + and [out_stat_file] keywords from job arguments with the paths. + + @param job_type type of job, either dump_row or out_stat + @param job string of job arguments to replace keywords + @param model_info dictionary containing info for each model processed. + Used to get filename template to use for substitution + @param runtime_settings_dict dictionary containing information for the + run that is being processed. Used to substitute values. + @returns job string with values substituted for [dump_row_file] or + [out_stat_file] + """ output_template = ( model_info[f'{job_type}_filename_template'] ) @@ -1015,6 +989,10 @@ def process_job_args(self, job_type, job, model_info, return job def get_all_runtime_settings(self): + """! Get all settings for each run of stat_analysis. + + @returns list of dictionaries containing settings for each run + """ runtime_settings_dict_list = [] c_dict_list = self.get_c_dict_list() for c_dict in c_dict_list: @@ -1234,6 +1212,8 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): model_list = [] obtype_list = [] dump_row_filename_list = [] + model_info = None + # get list of models to process models_to_run = runtime_settings_dict['MODEL'].split(',') for model_info in self.c_dict['MODEL_INFO_LIST']: @@ -1324,6 +1304,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): containing information needed to run a StatAnalysis job """ for runtime_settings in runtime_settings_dict_list: + self.clear() if not self.create_output_directories(runtime_settings): continue @@ -1380,29 +1361,31 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): value = (f"{mp_item.lower()} = \"{value}\";") self.env_var_dict[f'METPLUS_{mp_item}'] = value - value = f'jobs = ["' - value += '","'.join(runtime_settings['JOBS']) - value += '"];' - self.env_var_dict[f'METPLUS_JOBS'] = value + value = '","'.join(runtime_settings['JOBS']) + value = f'jobs = ["{value}"];' + self.env_var_dict['METPLUS_JOBS'] = value # send environment variables to logger self.set_environment_variables() - # set lookin dir + # set lookin dir to add to command self.logger.debug("Setting -lookin dir to " f"{runtime_settings['LOOKIN_DIR']}") self.c_dict['LOOKIN_DIR'] = runtime_settings['LOOKIN_DIR'] - self.c_dict['JOB_ARGS'] = runtime_settings['JOBS'][0] + + # set any command line arguments + if self.c_dict.get('CONFIG_FILE'): + self.args.append(f"-config {self.c_dict['CONFIG_FILE']}") + else: + self.args.append(runtime_settings['JOBS'][0]) # set -out file path if requested, value will be set to None if not - self.c_dict['OUTPUT_FILENAME'] = ( - runtime_settings.get('OUTPUT_FILENAME') - ) + output_filename = runtime_settings.get('OUTPUT_FILENAME') + if output_filename: + self.args.append(f"-out {output_filename}") self.build() - self.clear() - def create_output_directories(self, runtime_settings_dict): """! Check if output filename is set for dump_row or out_stat. If set, Check if the file already exists and if it should be skipped. From 1ac86450100867e291649c2e255cb48e76013b97 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 17 Oct 2022 16:05:27 -0600 Subject: [PATCH 68/92] moved functions to handle thresholds to string_manip utility --- .../pytests/util/met_util/test_met_util.py | 15 ------- .../string_manip/test_util_string_manip.py | 34 ++++++++++++++++ .../stat_analysis/test_stat_analysis.py | 23 ----------- metplus/util/met_util.py | 11 ------ metplus/util/string_manip.py | 39 +++++++++++++++++++ metplus/util/time_looping.py | 6 +-- metplus/wrappers/stat_analysis_wrapper.py | 32 ++------------- 7 files changed, 80 insertions(+), 80 deletions(-) diff --git a/internal/tests/pytests/util/met_util/test_met_util.py b/internal/tests/pytests/util/met_util/test_met_util.py index 797f313e87..481d4f9d46 100644 --- a/internal/tests/pytests/util/met_util/test_met_util.py +++ b/internal/tests/pytests/util/met_util/test_met_util.py @@ -330,21 +330,6 @@ def test_round_0p5(value, expected_result): assert util.round_0p5(value) == expected_result -@pytest.mark.parametrize( - 'expression, expected_result', [ - ('gt3', 'gt3'), - ('>3', 'gt3'), - ('le3.5', 'le3.5'), - ('<=3.5', 'le3.5'), - ('==4', 'eq4'), - ('!=3.5', 'ne3.5'), - ] -) -@pytest.mark.util -def test_comparison_to_letter_format(expression, expected_result): - assert util.comparison_to_letter_format(expression) == expected_result - - @pytest.mark.parametrize( 'skip_times_conf, expected_dict', [ ('"%d:30,31"', {'%d': ['30','31']}), diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index 7a1f2f7992..fc78f5d466 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -168,3 +168,37 @@ def test_list_to_str(input, add_quotes, expected_output): assert list_to_str(input) == expected_output else: assert list_to_str(input, add_quotes=add_quotes) == expected_output + + +@pytest.mark.parametrize( + 'expression, expected_result', [ + ('gt3', 'gt3'), + ('>3', 'gt3'), + ('le3.5', 'le3.5'), + ('<=3.5', 'le3.5'), + ('==4', 'eq4'), + ('!=3.5', 'ne3.5'), + ] +) +@pytest.mark.util +def test_comparison_to_letter_format(expression, expected_result): + assert comparison_to_letter_format(expression) == expected_result + + +@pytest.mark.parametrize( + 'expression, expected_result', [ + ('>1', 'gt1'), + ('>=0.2', 'ge0.2'), + ('<30', 'lt30'), + ('<=0.04', 'le0.04'), + ('==5', 'eq5'), + ('!=0.06', 'ne0.06'), + ('>0.05, gt0.05, >=1, ge1, <5, lt5, <=10, le10, ==15, eq15, !=20, ne20', + 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), + ('<805, <1609, <4828, <8045, >=8045, <16090', + 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), + ] +) +@pytest.mark.util +def test_format_thresh(expression, expected_result): + assert format_thresh(expression) == expected_result diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index b408d3269b..1a13060013 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -348,29 +348,6 @@ def test_set_lists_as_loop_or_group(metplus_config): for elem in test_lists_to_loop_items)) -@pytest.mark.parametrize( - 'expression, expected_result', [ - ('>1', 'gt1'), - ('>=0.2', 'ge0.2'), - ('<30', 'lt30'), - ('<=0.04', 'le0.04'), - ('==5', 'eq5'), - ('!=0.06', 'ne0.06'), - ('>0.05, gt0.05, >=1, ge1, <5, lt5, <=10, le10, ==15, eq15, !=20, ne20', - 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), - ('<805, <1609, <4828, <8045, >=8045, <16090', - 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), - ] -) -@pytest.mark.wrapper_d -def test_format_thresh(metplus_config, expression, expected_result): - # Independently test the creation of - # string values for defining thresholds - st = stat_analysis_wrapper(metplus_config) - - assert st.format_thresh(expression) == expected_result - - @pytest.mark.parametrize( 'lists_to_loop,c_dict_overrides,config_dict_overrides,expected_values', [ # Test 0 diff --git a/metplus/util/met_util.py b/metplus/util/met_util.py index 32eecab662..d9fb9b6c5c 100644 --- a/metplus/util/met_util.py +++ b/metplus/util/met_util.py @@ -895,17 +895,6 @@ def get_threshold_via_regex(thresh_string): return comparison_number_list -def comparison_to_letter_format(expression): - """! Convert comparison operator to the letter version if it is not already - @args expression string starting with comparison operator to - convert, i.e. gt3 or <=5.4 - @returns letter comparison operator, i.e. gt3 or le5.4 or None if invalid - """ - for symbol_comp, letter_comp in VALID_COMPARISONS.items(): - if letter_comp in expression or symbol_comp in expression: - return expression.replace(symbol_comp, letter_comp) - - return None def validate_thresholds(thresh_list): """ Checks list of thresholds to ensure all of them have the correct format diff --git a/metplus/util/string_manip.py b/metplus/util/string_manip.py index 61bc8e3e33..2779fa7ebe 100644 --- a/metplus/util/string_manip.py +++ b/metplus/util/string_manip.py @@ -7,6 +7,8 @@ import re from csv import reader +from .constants import VALID_COMPARISONS + def remove_quotes(input_string): """!Remove quotes from string""" @@ -210,3 +212,40 @@ def list_to_str(list_of_values, add_quotes=True): return '"' + '", "'.join(values) + '"' return ', '.join(list_of_values) + + +def comparison_to_letter_format(expression): + """! Convert comparison operator to the letter version if it is not already + + @param expression string starting with comparison operator to convert, + i.e. gt3 or <=5.4 + @returns letter comparison operator, i.e. gt3 or le5.4 or None if invalid + """ + for symbol_comp, letter_comp in VALID_COMPARISONS.items(): + if letter_comp in expression or symbol_comp in expression: + return expression.replace(symbol_comp, letter_comp) + + return None + + +def format_thresh(thresh_str): + """! Format thresholds for file naming + + @param thresh_str string of the thresholds. + Can be a comma-separated list, i.e. gt3,<=5.5, ==7 + + @returns string of comma-separated list of the threshold(s) with + letter format, i.e. gt3,le5.5,eq7 + """ + formatted_thresh_list = [] + # separate thresholds by comma and strip off whitespace around values + thresh_list = [thresh.strip() for thresh in thresh_str.split(',')] + for thresh in thresh_list: + if not thresh: + continue + + thresh_letter = comparison_to_letter_format(thresh) + if thresh_letter: + formatted_thresh_list.append(thresh_letter) + + return ','.join(formatted_thresh_list) diff --git a/metplus/util/time_looping.py b/metplus/util/time_looping.py index ce0b036acf..2b4cdb2cd6 100644 --- a/metplus/util/time_looping.py +++ b/metplus/util/time_looping.py @@ -12,7 +12,7 @@ def time_generator(config): Yields the next run time dictionary or None if something went wrong """ # determine INIT or VALID prefix - prefix = _get_time_prefix(config) + prefix = get_time_prefix(config) if not prefix: yield None return @@ -83,7 +83,7 @@ def time_generator(config): current_dt += time_interval def get_start_and_end_times(config): - prefix = _get_time_prefix(config) + prefix = get_time_prefix(config) if not prefix: return None, None @@ -150,7 +150,7 @@ def _create_time_input_dict(prefix, current_dt, clock_dt): 'today': clock_dt.strftime('%Y%m%d'), } -def _get_time_prefix(config): +def get_time_prefix(config): """! Read the METplusConfig object and determine the prefix for the time looping variables. diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 2b8074e15b..af633d156f 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -10,8 +10,7 @@ import itertools from dateutil.relativedelta import relativedelta -from ..util import getlist -from ..util import met_util as util +from ..util import getlist, format_thresh from ..util import do_string_sub, find_indices_in_config_section from ..util import parse_var_list, remove_quotes, list_to_str from ..util import get_start_and_end_times @@ -379,7 +378,7 @@ def _format_conf_list(self, conf_list): # do not add quotes and format thresholds if threshold list if 'THRESH' in conf_list: - return [self.format_thresh(item) for item in items] + return [format_thresh(item) for item in items] if conf_list in self.LIST_CATEGORIES: return items @@ -437,29 +436,6 @@ def set_lists_loop_or_group(self, c_dict): return c_dict - @staticmethod - def format_thresh(thresh_str): - """! Format thresholds for file naming - - @param thresh_str string of the thresholds. - Can be a comma-separated list, i.e. gt3,<=5.5, ==7 - - @returns string of comma-separated list of the threshold(s) with - letter format, i.e. gt3,le5.5,eq7 - """ - formatted_thresh_list = [] - # separate thresholds by comma and strip off whitespace around values - thresh_list = [thresh.strip() for thresh in thresh_str.split(',')] - for thresh in thresh_list: - if not thresh: - continue - - thresh_letter = util.comparison_to_letter_format(thresh) - if thresh_letter: - formatted_thresh_list.append(thresh_letter) - - return ','.join(formatted_thresh_list) - def build_stringsub_dict(self, config_dict): """! Build a dictionary with list names, dates, and commonly used identifiers to pass to string_template_substitution. @@ -1093,11 +1069,11 @@ def get_c_dict_list(self): } if fcst_thresh: - thresh_formatted = self.format_thresh(fcst_thresh) + thresh_formatted = format_thresh(fcst_thresh) c_dict['FCST_THRESH_LIST'].append(thresh_formatted) if obs_thresh: - thresh_formatted = self.format_thresh(obs_thresh) + thresh_formatted = format_thresh(obs_thresh) c_dict['OBS_THRESH_LIST'].append(thresh_formatted) if fcst_units: From c91e299bb1eebb4b05a68bf55a4fa6148e85fe76 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 17 Oct 2022 16:05:47 -0600 Subject: [PATCH 69/92] cleaned up logic to remove pylint complaints and be consistent --- metplus/wrappers/stat_analysis_wrapper.py | 293 ++++++++++------------ 1 file changed, 139 insertions(+), 154 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index af633d156f..431c32c446 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -13,7 +13,7 @@ from ..util import getlist, format_thresh from ..util import do_string_sub, find_indices_in_config_section from ..util import parse_var_list, remove_quotes, list_to_str -from ..util import get_start_and_end_times +from ..util import get_start_and_end_times, get_time_prefix from ..util import ti_get_seconds_from_relativedelta from ..util import get_met_time_list, get_delta_list from ..util import YMD, YMD_HMS @@ -152,18 +152,18 @@ def create_c_dict(self): c_dict['OUTPUT_DIR'] = self.config.getdir('STAT_ANALYSIS_OUTPUT_DIR', '') + if not c_dict['OUTPUT_DIR']: + self.log_error("Must set STAT_ANALYSIS_OUTPUT_DIR") # read optional template to set -out command line argument c_dict['OUTPUT_TEMPLATE'] = ( self.config.getraw('config', 'STAT_ANALYSIS_OUTPUT_TEMPLATE', '') ) - # set date type, which is typically controlled by LOOP_BY - c_dict['DATE_TYPE'] = self.config.getstr('config', - 'DATE_TYPE', - self.config.getstr('config', - 'LOOP_BY', - '')) + # set date type, which is controlled by LOOP_BY + c_dict['DATE_TYPE'] = get_time_prefix(self.config) + if not c_dict['DATE_TYPE']: + self.isOK = False start_dt, end_dt = get_start_and_end_times(self.config) if not start_dt: @@ -198,9 +198,14 @@ def create_c_dict(self): data_type='float', metplus_configs=['STAT_ANALYSIS_HSS_EC_VALUE']) - return self.c_dict_error_check(c_dict, all_field_lists_empty) + return self._c_dict_error_check(c_dict, all_field_lists_empty) def run_all_times(self): + """! Function called when processing all times. + + @returns list of tuples containing all commands that were run and the + environment variables that were set for each + """ self.run_stat_analysis() return self.all_commands @@ -212,6 +217,10 @@ def run_all_times(self): # self.run_stat_analysis() def _read_jobs_from_config(self): + """! Parse the jobs from the METplusConfig object + + @returns list of strings containing each job specifications + """ jobs = [] job_indices = list( find_indices_in_config_section(r'STAT_ANALYSIS_JOB(\d+)$', @@ -234,7 +243,7 @@ def _read_jobs_from_config(self): return jobs - def c_dict_error_check(self, c_dict, all_field_lists_empty): + def _c_dict_error_check(self, c_dict, all_field_lists_empty): """! Check values read into c_dict from METplusConfig and report errors if anything is misconfigured. @@ -254,9 +263,6 @@ def c_dict_error_check(self, c_dict, all_field_lists_empty): "any filtering done unless you add the " "arguments to STAT_ANALYSIS_JOBS") - if not c_dict['OUTPUT_DIR']: - self.log_error("Must set STAT_ANALYSIS_OUTPUT_DIR") - if not c_dict['JOBS']: self.log_error( "Must set at least one job with STAT_ANALYSIS_JOB" @@ -275,17 +281,6 @@ def c_dict_error_check(self, c_dict, all_field_lists_empty): self.log_error(f'Must set {conf} if [{check}] is used' ' in a job') - for conf_list in self.LIST_CATEGORIES: - if not c_dict[conf_list]: - self.log_error(f"Must set {conf_list} to run StatAnalysis") - - if not c_dict['DATE_TYPE']: - self.log_error("DATE_TYPE or LOOP_BY must be set to run " - "StatAnalysis wrapper") - - if c_dict['DATE_TYPE'] not in ['VALID', 'INIT']: - self.log_error("DATE_TYPE must be VALID or INIT") - # if var list is set and field lists are not all empty, error if c_dict['VAR_LIST'] and not all_field_lists_empty: self.log_error("Field information defined in both " @@ -478,96 +473,125 @@ def build_stringsub_dict(self, config_dict): .replace(',', '_').replace('*', 'ALL') ) - if 'HOUR' in list_name: - delta_list = get_delta_list(config_dict[list_name]) - if not delta_list: - stringsub_dict[sub_name] = list_name_value - stringsub_dict[sub_name + '_beg'] = relativedelta() - stringsub_dict[sub_name + '_end'] = ( - relativedelta(hours=+23, minutes=+59, seconds=+59) - ) - continue - if len(delta_list) == 1: - stringsub_dict[sub_name] = delta_list[0] - else: - stringsub_dict[sub_name] = ( - '_'.join(get_met_time_list(config_dict[list_name])) - ) + if 'HOUR' not in list_name and 'LEAD' not in list_name: + stringsub_dict[sub_name] = list_name_value - stringsub_dict[sub_name + '_beg'] = delta_list[0] - stringsub_dict[sub_name + '_end'] = delta_list[-1] - - check_list = self._get_check_list(list_name, config_dict) - # if opposite fcst is not set or the same, - # set init/valid hour beg/end to fcst, same for obs - if not check_list or config_dict[list_name] == check_list: - # sub name e.g. fcst_valid_hour - # generic list e.g. valid_hour - generic_list = ( - sub_name.replace('fcst_', '').replace('obs_', '') - ) - stringsub_dict[f'{generic_list}_beg'] = ( - stringsub_dict[f'{sub_name}_beg'] + # if list is MODEL, also set obtype + if list_name == 'MODEL': + stringsub_dict['obtype'] = ( + config_dict['OBTYPE'].replace('"', '').replace(' ', '') ) - stringsub_dict[f'{generic_list}_end'] = ( - stringsub_dict[f'{sub_name}_end'] - ) - if (stringsub_dict[f'{generic_list}_beg'] == - stringsub_dict[f'{generic_list}_end']): - stringsub_dict[generic_list] = ( - stringsub_dict[f'{sub_name}_end'] - ) + continue + + if 'HOUR' in list_name: + self._build_stringsub_hours(list_name, config_dict, + stringsub_dict) elif 'LEAD' in list_name: - lead_list = get_met_time_list(config_dict[list_name]) + self._build_stringsub_leads(list_name, config_dict, + stringsub_dict) - if not lead_list: - continue + # Some lines for debugging if needed in future + # for key, value in stringsub_dict.items(): + # self.logger.debug("{} ({})".format(key, value)) + return stringsub_dict - # if multiple leads are specified, format lead info - # using met time notation separated by underscore - if len(lead_list) > 1: - stringsub_dict[sub_name] = '_'.join(lead_list) - continue + def _build_stringsub_hours(self, list_name, config_dict, stringsub_dict): + """! Handle logic specific to setting lists named with HOUR + + @param list_name name of list to process + @param config_dict dictionary to read values from + @param stringsub_dict dictionary to set values + """ + sub_name = list_name.lower() + delta_list = get_delta_list(config_dict[list_name]) + if not delta_list: + list_name_value = ( + config_dict[list_name].replace('"', '').replace(' ', '') + .replace(',', '_').replace('*', 'ALL') + ) + stringsub_dict[sub_name] = list_name_value + stringsub_dict[sub_name + '_beg'] = relativedelta() + stringsub_dict[sub_name + '_end'] = ( + relativedelta(hours=+23, minutes=+59, seconds=+59) + ) + return - stringsub_dict[sub_name] = lead_list[0] + if len(delta_list) == 1: + stringsub_dict[sub_name] = delta_list[0] + else: + stringsub_dict[sub_name] = ( + '_'.join(get_met_time_list(config_dict[list_name])) + ) - lead_rd = get_delta_list(config_dict[list_name])[0] - total_sec = ti_get_seconds_from_relativedelta(lead_rd) - stringsub_dict[sub_name+'_totalsec'] = str(total_sec) + stringsub_dict[sub_name + '_beg'] = delta_list[0] + stringsub_dict[sub_name + '_end'] = delta_list[-1] + + check_list = self._get_check_list(list_name, config_dict) + # if opposite fcst is not set or the same, + # set init/valid hour beg/end to fcst, same for obs + if not check_list or config_dict[list_name] == check_list: + # sub name e.g. fcst_valid_hour + # generic list e.g. valid_hour + generic_list = ( + sub_name.replace('fcst_', '').replace('obs_', '') + ) + stringsub_dict[f'{generic_list}_beg'] = ( + stringsub_dict[f'{sub_name}_beg'] + ) + stringsub_dict[f'{generic_list}_end'] = ( + stringsub_dict[f'{sub_name}_end'] + ) + if (stringsub_dict[f'{generic_list}_beg'] == + stringsub_dict[f'{generic_list}_end']): + stringsub_dict[generic_list] = ( + stringsub_dict[f'{sub_name}_end'] + ) - stringsub_dict[f'{sub_name}_hour'] = lead_list[0][:-4] - stringsub_dict[f'{sub_name}_min'] = lead_list[0][-4:-2] - stringsub_dict[f'{sub_name}_sec'] = lead_list[0][-2:] + def _build_stringsub_leads(self, list_name, config_dict, stringsub_dict): + """! Handle logic specific to setting lists named with LEAD - check_list = self._get_check_list(list_name, config_dict) - if not check_list or config_dict[list_name] == check_list: - stringsub_dict['lead'] = stringsub_dict[sub_name] - stringsub_dict['lead_hour'] = ( - stringsub_dict[sub_name+'_hour'] - ) - stringsub_dict['lead_min'] = ( - stringsub_dict[sub_name+'_min'] - ) - stringsub_dict['lead_sec'] = ( - stringsub_dict[sub_name+'_sec'] - ) - stringsub_dict['lead_totalsec'] = ( - stringsub_dict[sub_name+'_totalsec'] - ) - else: - stringsub_dict[sub_name] = list_name_value + @param list_name name of list to process + @param config_dict dictionary to read values from + @param stringsub_dict dictionary to set values + """ + sub_name = list_name.lower() + lead_list = get_met_time_list(config_dict[list_name]) - # if list is MODEL, also set obtype - if list_name == 'MODEL': - stringsub_dict['obtype'] = ( - config_dict['OBTYPE'].replace('"', '').replace(' ', '') - ) + if not lead_list: + return - # Some lines for debugging if needed in future - # for key, value in stringsub_dict.items(): - # self.logger.debug("{} ({})".format(key, value)) - return stringsub_dict + # if multiple leads are specified, format lead info + # using met time notation separated by underscore + if len(lead_list) > 1: + stringsub_dict[sub_name] = '_'.join(lead_list) + return + + stringsub_dict[sub_name] = lead_list[0] + + lead_rd = get_delta_list(config_dict[list_name])[0] + total_sec = ti_get_seconds_from_relativedelta(lead_rd) + stringsub_dict[sub_name + '_totalsec'] = str(total_sec) + + stringsub_dict[f'{sub_name}_hour'] = lead_list[0][:-4] + stringsub_dict[f'{sub_name}_min'] = lead_list[0][-4:-2] + stringsub_dict[f'{sub_name}_sec'] = lead_list[0][-2:] + + check_list = self._get_check_list(list_name, config_dict) + if not check_list or config_dict[list_name] == check_list: + stringsub_dict['lead'] = stringsub_dict[sub_name] + stringsub_dict['lead_hour'] = ( + stringsub_dict[sub_name + '_hour'] + ) + stringsub_dict['lead_min'] = ( + stringsub_dict[sub_name + '_min'] + ) + stringsub_dict['lead_sec'] = ( + stringsub_dict[sub_name + '_sec'] + ) + stringsub_dict['lead_totalsec'] = ( + stringsub_dict[sub_name + '_totalsec'] + ) @staticmethod def _get_check_list(list_name, config_dict): @@ -1036,14 +1060,13 @@ def get_c_dict_list(self): f"VAR{var_info['index']}_FOURIER_DECOMP", False) ) + fourier_wave_num_pairs = [''] if run_fourier: fourier_wave_num_pairs = getlist( self.config.getstr('config', f"VAR{var_info['index']}_WAVE_NUM_LIST", '') ) - else: - fourier_wave_num_pairs = [''] # if no thresholds were specified, use a list # containing an empty string to loop one iteration @@ -1252,7 +1275,6 @@ def get_job_info(self, model_info, runtime_settings_dict): # substitute filename templates that may be found in rest of job job = do_string_sub(job, **stringsub_dict) - jobs.append(job) return jobs @@ -1284,62 +1306,25 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): if not self.create_output_directories(runtime_settings): continue - # Set environment variables and run stat_analysis. + # Set legacy environment variables for name, value in runtime_settings.items(): self.add_env_var(name, value) # set METPLUS_ env vars for MET config file to be consistent # with other wrappers - mp_lists = ['MODEL', - 'DESC', - 'FCST_LEAD', - 'OBS_LEAD', - 'FCST_VALID_HOUR', - 'OBS_VALID_HOUR', - 'FCST_INIT_HOUR', - 'OBS_INIT_HOUR', - 'FCST_VAR', - 'OBS_VAR', - 'FCST_UNITS', - 'OBS_UNITS', - 'FCST_LEVEL', - 'OBS_LEVEL', - 'OBTYPE', - 'VX_MASK', - 'INTERP_MTHD', - 'INTERP_PNTS', - 'FCST_THRESH', - 'OBS_THRESH', - 'COV_THRESH', - 'ALPHA', - 'LINE_TYPE' - ] - for mp_list in mp_lists: - if not runtime_settings.get(mp_list, ''): - continue - value = (f"{mp_list.lower()} = " - f"[{runtime_settings.get(mp_list, '')}];") - self.env_var_dict[f'METPLUS_{mp_list}'] = value - - mp_items = ['FCST_VALID_BEG', - 'FCST_VALID_END', - 'OBS_VALID_BEG', - 'OBS_VALID_END', - 'FCST_INIT_BEG', - 'FCST_INIT_END', - 'OBS_INIT_BEG', - 'OBS_INIT_END', - ] - for mp_item in mp_items: - if not runtime_settings.get(mp_item, ''): + for key in self.WRAPPER_ENV_VAR_KEYS: + item = key.replace('METPLUS_', '') + if not runtime_settings.get(item, ''): continue - value = remove_quotes(runtime_settings.get(mp_item, '')) - value = (f"{mp_item.lower()} = \"{value}\";") - self.env_var_dict[f'METPLUS_{mp_item}'] = value - - value = '","'.join(runtime_settings['JOBS']) - value = f'jobs = ["{value}"];' - self.env_var_dict['METPLUS_JOBS'] = value + value = runtime_settings.get(item, '') + if key.endswith('_JOBS'): + value = '["' + '","'.join(value) + '"]' + elif key.endswith('_BEG') or key.endswith('_END'): + value = f'"{value}"' + else: + value = f'[{value}]' + value = f'{item.lower()} = {value};' + self.env_var_dict[key] = value # send environment variables to logger self.set_environment_variables() From 665cf09e9aa8b40497c3ffc5ff292dbbfde641d0 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 17 Oct 2022 16:06:35 -0600 Subject: [PATCH 70/92] turn off all use case tests --- .github/parm/use_case_groups.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/parm/use_case_groups.json b/.github/parm/use_case_groups.json index 6553fec63a..f4dcd06a1e 100644 --- a/.github/parm/use_case_groups.json +++ b/.github/parm/use_case_groups.json @@ -7,7 +7,7 @@ { "category": "met_tool_wrapper", "index_list": "30-58", - "run": true + "run": false }, { "category": "air_quality_and_comp", @@ -52,7 +52,7 @@ { "category": "data_assimilation", "index_list": "0", - "run": true + "run": false }, { "category": "marine_and_cryosphere", @@ -82,7 +82,7 @@ { "category": "medium_range", "index_list": "3-5", - "run": true + "run": false }, { "category": "medium_range", @@ -167,7 +167,7 @@ { "category": "s2s_mid_lat", "index_list": "0-2", - "run": true + "run": false }, { "category": "s2s_mid_lat", From b563d864a7d2acd06ccdf755271e57f87f00b611 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Wed, 19 Oct 2022 16:42:23 -0600 Subject: [PATCH 71/92] check that changing logic to not set beg or end based on hour list does not break existing use cases, ci-run-all-diff --- metplus/wrappers/stat_analysis_wrapper.py | 37 ++++++++++++++++------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 431c32c446..fa287702f5 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -863,14 +863,14 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, # if hour list is not set if not hour_list or (len(hour_list) == 1 and hour_list == ['']): - if date_type == init_or_valid: - config_dict[f'{prefix}_BEG'] = date_beg.strftime(YMD_HMS) - - # if end time is only YYYYMMDD, set HHHMMSS to 23:59:59 - if date_end == datetime.strptime(end_ymd, YMD): - config_dict[f'{prefix}_END'] = f'{end_ymd}_235959' - else: - config_dict[f'{prefix}_END'] = date_end.strftime(YMD_HMS) + # if date_type == init_or_valid: + # config_dict[f'{prefix}_BEG'] = date_beg.strftime(YMD_HMS) + # + # # if end time is only YYYYMMDD, set HHHMMSS to 23:59:59 + # if date_end == datetime.strptime(end_ymd, YMD): + # config_dict[f'{prefix}_END'] = f'{end_ymd}_235959' + # else: + # config_dict[f'{prefix}_END'] = date_end.strftime(YMD_HMS) return # if multiple hours are specified @@ -886,9 +886,24 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, # if 1 hour specified hour_now = hour_list[0].replace('"', '') config_dict[f'{prefix}_HOUR'] = '"'+hour_now+'"' - if date_type == init_or_valid: - config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_now}' - config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_now}' + # if date_type == init_or_valid: + # config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_now}' + # config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_now}' + + # check if explicit value is set for _BEG or _END + # e.g. STAT_ANALYSIS_FCST_INIT_BEG + app = self.app_name.upper() + for beg_or_end in ('BEG', 'END'): + var_prefix = f'{app}_{prefix}_{beg_or_end}' + generic_prefix = f'{app}_{init_or_valid}_{beg_or_end}' + value = None + if self.config.has_option('config', var_prefix): + value = self.config.getraw('config', var_prefix) + elif self.config.has_option('config', generic_prefix): + value = self.config.getraw('config', generic_prefix) + + if value: + config_dict[f'{prefix}_{beg_or_end}'] = value def parse_model_info(self): """! Parse for model information. From e871d501dafa22e400ab3852cd2dbd057060c903 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 11:18:46 -0600 Subject: [PATCH 72/92] change logic to never set fcst/obs_init/valid__beg/end values based on hour lists and only set them if explicitly requested in the METplus config --- metplus/wrappers/stat_analysis_wrapper.py | 49 +++++------------------ 1 file changed, 9 insertions(+), 40 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index fa287702f5..1ea00e95b1 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -834,6 +834,7 @@ def format_valid_init(self, config_dict): @returns dictionary containing the edited configuration information for valid and initialization dates and hours """ + # set all of the HOUR and LEAD lists to include the MET time format for list_name in self.FORMAT_LISTS: list_name = list_name.replace('_LIST', '') values = get_met_time_list(config_dict.get(list_name, '')) @@ -844,51 +845,19 @@ def format_valid_init(self, config_dict): for init_or_valid in ['INIT', 'VALID']: self._format_valid_init_item(config_dict, fcst_or_obs, - init_or_valid, - self.c_dict['DATE_TYPE']) + init_or_valid) return config_dict - def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid, - date_type): - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] - - # get YYYYMMDD of begin and end time - beg_ymd = date_beg.strftime(YMD) - end_ymd = date_end.strftime(YMD) + def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid): + """! Check if variables are set in the METplusConfig to explicitly + set the begin and end values in the wrapped MET config file. + @param config_dict dictionary to set values to set in MET config + @param fcst_or_obs string either FCST or OBS + @param init_or_valid string either INIT or VALID + """ prefix = f'{fcst_or_obs}_{init_or_valid}' - hour_list = config_dict[f'{prefix}_HOUR'].split(', ') - - # if hour list is not set - if not hour_list or (len(hour_list) == 1 and hour_list == ['']): - # if date_type == init_or_valid: - # config_dict[f'{prefix}_BEG'] = date_beg.strftime(YMD_HMS) - # - # # if end time is only YYYYMMDD, set HHHMMSS to 23:59:59 - # if date_end == datetime.strptime(end_ymd, YMD): - # config_dict[f'{prefix}_END'] = f'{end_ymd}_235959' - # else: - # config_dict[f'{prefix}_END'] = date_end.strftime(YMD_HMS) - return - - # if multiple hours are specified - if len(hour_list) > 1: - if date_type == init_or_valid: - hour_beg = hour_list[0].replace('"', '') - hour_end = hour_list[-1].replace('"', '') - config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_beg}' - config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_end}' - - return - - # if 1 hour specified - hour_now = hour_list[0].replace('"', '') - config_dict[f'{prefix}_HOUR'] = '"'+hour_now+'"' - # if date_type == init_or_valid: - # config_dict[f'{prefix}_BEG'] = f'{beg_ymd}_{hour_now}' - # config_dict[f'{prefix}_END'] = f'{end_ymd}_{hour_now}' # check if explicit value is set for _BEG or _END # e.g. STAT_ANALYSIS_FCST_INIT_BEG From 653b04cde7da8158fc055ab2d38acac63dc0b85e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 11:25:40 -0600 Subject: [PATCH 73/92] pass runtime settings to set_environment_variables function so that string template substitution on any env var that will be set by wrapper will be performed before calling stat_analysis --- metplus/wrappers/stat_analysis_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 1ea00e95b1..dc85a8edc1 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1311,7 +1311,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): self.env_var_dict[key] = value # send environment variables to logger - self.set_environment_variables() + self.set_environment_variables(runtime_settings) # set lookin dir to add to command self.logger.debug("Setting -lookin dir to " From 5f25e89500bdec28f63f3b3c9ed8fa905d68d6bc Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 12:49:44 -0600 Subject: [PATCH 74/92] added logic to substitute string template tags when reading explicit fcst/obs_init/valid_beg/end values from config. prevent crash when dict values are not set by using get --- metplus/wrappers/stat_analysis_wrapper.py | 45 ++++++++++++----------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index dc85a8edc1..e3f34a758d 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -435,10 +435,6 @@ def build_stringsub_dict(self, config_dict): """! Build a dictionary with list names, dates, and commonly used identifiers to pass to string_template_substitution. - @param lists_to_loop list of all the list names whose items - are being grouped together - @param lists_to_group list of all the list names whose items - are being looped over @param config_dict dictionary containing the configuration information @returns dictionary with the formatted info to pass to do_string_sub """ @@ -456,22 +452,19 @@ def build_stringsub_dict(self, config_dict): # Set string sub info from fcst/obs hour lists self._set_stringsub_hours(stringsub_dict, - config_dict[f'FCST_{date_type}_HOUR'], - config_dict[f'OBS_{date_type}_HOUR']) + config_dict.get(f'FCST_{date_type}_HOUR'), + config_dict.get(f'OBS_{date_type}_HOUR')) # handle opposite of date_type VALID if INIT and vice versa self._set_strinsub_other(stringsub_dict, date_type.lower(), - config_dict['FCST_LEAD'], - config_dict['OBS_LEAD']) + config_dict.get('FCST_LEAD'), + config_dict.get('OBS_LEAD')) # Set loop information for loop_or_group_list in self.EXPECTED_CONFIG_LISTS: list_name = loop_or_group_list.replace('_LIST', '') sub_name = list_name.lower() - list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') - .replace(',', '_').replace('*', 'ALL') - ) + list_name_value = self._get_list_name_value(list_name, config_dict) if 'HOUR' not in list_name and 'LEAD' not in list_name: stringsub_dict[sub_name] = list_name_value @@ -479,7 +472,8 @@ def build_stringsub_dict(self, config_dict): # if list is MODEL, also set obtype if list_name == 'MODEL': stringsub_dict['obtype'] = ( - config_dict['OBTYPE'].replace('"', '').replace(' ', '') + config_dict.get('OBTYPE', '').replace('"', '') + .replace(' ', '') ) continue @@ -506,10 +500,7 @@ def _build_stringsub_hours(self, list_name, config_dict, stringsub_dict): sub_name = list_name.lower() delta_list = get_delta_list(config_dict[list_name]) if not delta_list: - list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') - .replace(',', '_').replace('*', 'ALL') - ) + list_name_value = self._get_list_name_value(list_name, config_dict) stringsub_dict[sub_name] = list_name_value stringsub_dict[sub_name + '_beg'] = relativedelta() stringsub_dict[sub_name + '_end'] = ( @@ -548,6 +539,13 @@ def _build_stringsub_hours(self, list_name, config_dict, stringsub_dict): stringsub_dict[f'{sub_name}_end'] ) + @staticmethod + def _get_list_name_value(list_name, config_dict): + value = config_dict.get(list_name, '') + value = value.replace('"', '').replace(' ', '').replace(',', '_') + value = value.replace('*', 'ALL') + return value + def _build_stringsub_leads(self, list_name, config_dict, stringsub_dict): """! Handle logic specific to setting lists named with LEAD @@ -556,7 +554,7 @@ def _build_stringsub_leads(self, list_name, config_dict, stringsub_dict): @param stringsub_dict dictionary to set values """ sub_name = list_name.lower() - lead_list = get_met_time_list(config_dict[list_name]) + lead_list = get_met_time_list(config_dict.get(list_name)) if not lead_list: return @@ -834,6 +832,7 @@ def format_valid_init(self, config_dict): @returns dictionary containing the edited configuration information for valid and initialization dates and hours """ + stringsub_dict = self.build_stringsub_dict(config_dict) # set all of the HOUR and LEAD lists to include the MET time format for list_name in self.FORMAT_LISTS: list_name = list_name.replace('_LIST', '') @@ -844,12 +843,13 @@ def format_valid_init(self, config_dict): for fcst_or_obs in ['FCST', 'OBS']: for init_or_valid in ['INIT', 'VALID']: self._format_valid_init_item(config_dict, + stringsub_dict, fcst_or_obs, init_or_valid) return config_dict - def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid): + def _format_valid_init_item(self, config_dict, stringsub_dict, fcst_or_obs, init_or_valid): """! Check if variables are set in the METplusConfig to explicitly set the begin and end values in the wrapped MET config file. @@ -872,7 +872,8 @@ def _format_valid_init_item(self, config_dict, fcst_or_obs, init_or_valid): value = self.config.getraw('config', generic_prefix) if value: - config_dict[f'{prefix}_{beg_or_end}'] = value + formatted_value = do_string_sub(value, **stringsub_dict) + config_dict[f'{prefix}_{beg_or_end}'] = formatted_value def parse_model_info(self): """! Parse for model information. @@ -1202,6 +1203,8 @@ def get_model_obtype_and_lookindir(self, runtime_settings_dict): for model_info in self.c_dict['MODEL_INFO_LIST']: # skip model if not in list of models to process if model_info['name'] not in models_to_run: + self.logger.debug(f"Model {model_info['name']} not found in " + "list of models to run. Skipping.") continue model_list.append(model_info['name']) @@ -1311,7 +1314,7 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): self.env_var_dict[key] = value # send environment variables to logger - self.set_environment_variables(runtime_settings) + self.set_environment_variables() # set lookin dir to add to command self.logger.debug("Setting -lookin dir to " From 2462db3ea79891c8f6237d18a983c681f377cb58 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 12:51:29 -0600 Subject: [PATCH 75/92] started adding tests to ensure fcst/obs_init/valid_beg/end values are read from METplus config and set in env vars for wrapped MET config file properly --- .../stat_analysis/test_stat_analysis.py | 51 ++++++++++++++++++- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 1a13060013..9ea1e21ba0 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -77,16 +77,63 @@ def set_minimum_config_settings(config): '{valid?fmt=%Y%m%d%H}') config.set('config', 'GROUP_LIST_ITEMS', 'DESC_LIST') config.set('config', 'LOOP_LIST_ITEMS', 'MODEL_LIST') - config.set('config', 'MODEL_LIST', 'MODEL1') + config.set('config', 'MODEL_LIST', 'MODEL_A') config.set('config', 'STAT_ANALYSIS_JOB1', '-job filter') config.set('config', 'MODEL1', 'MODEL_A') - config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', '/some/lookin/dir') + config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', + '{METPLUS_BASE}/internal/tests/data/stat_data') # not required, can be unset for certain tests config.set('config', 'STAT_ANALYSIS_CONFIG_FILE', '{PARM_BASE}/met_config/STATAnalysisConfig_wrapped') +@pytest.mark.parametrize( + 'config_overrides, expected_env_vars', [ + ({}, {}), + ({'STAT_ANALYSIS_FCST_VALID_BEG': '{fcst_valid_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_VALID_BEG': 'fcst_valid_beg = "20221014_000000";'}), + ({'STAT_ANALYSIS_FCST_VALID_END': '{fcst_valid_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221015_235959";'}), + ({'FCST_VALID_HOUR_LIST': '12'}, + {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000"];'}), + ({'FCST_VALID_HOUR_LIST': '12,108'}, + {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000", "1080000"];'}), + ] +) +@pytest.mark.wrapper_c +def test_valid_init_env_vars(metplus_config, config_overrides, + expected_env_vars): + config = metplus_config() + set_minimum_config_settings(config) + config.set('config', 'INIT_END', '20221015') + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + assert wrapper.isOK + + runtime_settings_dict_list = wrapper.get_all_runtime_settings() + assert runtime_settings_dict_list + first_runtime_only = [runtime_settings_dict_list[0]] + wrapper.run_stat_analysis_job(first_runtime_only) + print('FIRST RUNTIME SETTINGS:') + pp.pprint(first_runtime_only) + all_cmds = wrapper.all_commands + print(f"ALL COMMANDS: {all_cmds}") + _, actual_env_vars = all_cmds[0] + + env_var_keys = [item for item in wrapper.WRAPPER_ENV_VAR_KEYS + if 'BEG' in item or 'END' in item] + for env_var_key in env_var_keys: + match = next((item for item in actual_env_vars if + item.startswith(env_var_key)), None) + assert match is not None + actual_value = match.split('=', 1)[1] + print(f"ENV VAR: {env_var_key}") + assert expected_env_vars.get(env_var_key, '') == actual_value + + @pytest.mark.parametrize( 'config_overrides, expected_result', [ ({}, True), From 6c9a627e1bcc3307d5786f8947a7615ee8f35be0 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:16:08 -0600 Subject: [PATCH 76/92] added more tests for setting beg/end values in wrapped MET config --- .../stat_analysis/test_stat_analysis.py | 51 +++++++++++++++++-- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 9ea1e21ba0..fa13f7954a 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -24,8 +24,7 @@ def stat_analysis_wrapper(metplus_config): # Default, empty StatAnalysisWrapper with some configuration values set # to /path/to: - extra_configs = [] - extra_configs.append(TEST_CONF) + extra_configs = [TEST_CONF] config = metplus_config(extra_configs) handle_tmp_dir(config) return StatAnalysisWrapper(config) @@ -90,18 +89,60 @@ def set_minimum_config_settings(config): @pytest.mark.parametrize( 'config_overrides, expected_env_vars', [ + # 0 ({}, {}), + # 1 - fcst valid beg ({'STAT_ANALYSIS_FCST_VALID_BEG': '{fcst_valid_beg?fmt=%Y%m%d_%H%M%S}'}, {'METPLUS_FCST_VALID_BEG': 'fcst_valid_beg = "20221014_000000";'}), + # 2 - fcst valid end ({'STAT_ANALYSIS_FCST_VALID_END': '{fcst_valid_end?fmt=%Y%m%d_%H%M%S}'}, {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221015_235959";'}), + # 3 - fcst valid end with shift + ({'STAT_ANALYSIS_FCST_VALID_END': '{fcst_valid_end?fmt=%Y%m%d?shift=1d}_000000'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221016_000000";'}), + # 4 - obs valid beg + ({'STAT_ANALYSIS_OBS_VALID_BEG': '{obs_valid_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_VALID_BEG': 'obs_valid_beg = "20221014_000000";'}), + # 5 - obs valid end + ({'STAT_ANALYSIS_OBS_VALID_END': '{obs_valid_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_VALID_END': 'obs_valid_end = "20221015_235959";'}), + # 6 fcst init beg + ({'STAT_ANALYSIS_FCST_INIT_BEG': '{fcst_init_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_INIT_BEG': 'fcst_init_beg = "20221014_000000";'}), + # 7 - fcst init end + ({'STAT_ANALYSIS_FCST_INIT_END': '{fcst_init_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_INIT_END': 'fcst_init_end = "20221015_235959";'}), + # 8 - fcst valid hour single ({'FCST_VALID_HOUR_LIST': '12'}, {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000"];'}), + # 9 - fcst valid hour multiple ({'FCST_VALID_HOUR_LIST': '12,108'}, {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000", "1080000"];'}), + # 10 - obs init beg + ({'STAT_ANALYSIS_OBS_INIT_BEG': '{obs_init_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_INIT_BEG': 'obs_init_beg = "20221014_000000";'}), + # 11 - obs init end + ({'STAT_ANALYSIS_OBS_INIT_END': '{obs_init_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_INIT_END': 'obs_init_end = "20221015_235959";'}), + # 12 - generic valid beg + ({'STAT_ANALYSIS_VALID_BEG': '{fcst_valid_beg?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_VALID_BEG': 'fcst_valid_beg = "20221014_12";', + 'METPLUS_OBS_VALID_BEG': 'obs_valid_beg = "20221014_12";'}), + # 13 - generic valid end + ({'STAT_ANALYSIS_VALID_END': '{fcst_valid_end?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221015_12";', + 'METPLUS_OBS_VALID_END': 'obs_valid_end = "20221015_12";'}), + # 14 - generic init beg + ({'STAT_ANALYSIS_INIT_BEG': '{fcst_init_beg?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_INIT_BEG': 'fcst_init_beg = "20221014_12";', + 'METPLUS_OBS_INIT_BEG': 'obs_init_beg = "20221014_12";'}), + # 15 - generic init end + ({'STAT_ANALYSIS_INIT_END': '{fcst_init_end?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_INIT_END': 'fcst_init_end = "20221015_12";', + 'METPLUS_OBS_INIT_END': 'obs_init_end = "20221015_12";'}), ] ) -@pytest.mark.wrapper_c +@pytest.mark.wrapper_d def test_valid_init_env_vars(metplus_config, config_overrides, expected_env_vars): config = metplus_config() @@ -115,11 +156,11 @@ def test_valid_init_env_vars(metplus_config, config_overrides, runtime_settings_dict_list = wrapper.get_all_runtime_settings() assert runtime_settings_dict_list + first_runtime_only = [runtime_settings_dict_list[0]] wrapper.run_stat_analysis_job(first_runtime_only) - print('FIRST RUNTIME SETTINGS:') - pp.pprint(first_runtime_only) all_cmds = wrapper.all_commands + print(f"ALL COMMANDS: {all_cmds}") _, actual_env_vars = all_cmds[0] From 437e6d60a3f1cad5d6499d575002b45894484f19 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:18:51 -0600 Subject: [PATCH 77/92] removed setting of legacy environment variables that were set to support very old wrapped MET config files for StatAnalysis --- metplus/wrappers/stat_analysis_wrapper.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index e3f34a758d..8aa5396659 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1293,10 +1293,6 @@ def run_stat_analysis_job(self, runtime_settings_dict_list): if not self.create_output_directories(runtime_settings): continue - # Set legacy environment variables - for name, value in runtime_settings.items(): - self.add_env_var(name, value) - # set METPLUS_ env vars for MET config file to be consistent # with other wrappers for key in self.WRAPPER_ENV_VAR_KEYS: From a7ef6ef24857d6e77622d1ac2e26a98bba8f7808 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:38:13 -0600 Subject: [PATCH 78/92] updating docs with info about new settings --- docs/Users_Guide/glossary.rst | 13 +++++++++++++ docs/Users_Guide/wrappers.rst | 30 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index df39a3c89c..a49d3d75ce 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -9956,3 +9956,16 @@ METplus Configuration Glossary Specify the value for 'nc_orank_flag.weight' in the MET configuration file for EnsembleStat. | *Used by:* EnsembleStat + + STAT_ANALYSIS_FCST_INIT_BEG + Specify the value for 'fcst_init_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example: + + [config] + INIT_BEG = 20221014 + STAT_ANALYSIS_FCST_INIT_BEG = {fcst_init_beg?fmt=%Y%m%d_%H} + + will set fcst_init_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 5f5c65b0a4..e32b452341 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -6830,6 +6830,28 @@ Then add the name of the list (without the numbers) to LOOP_LIST_ITEMS:: If FCST_LEAD_LIST was added to GROUP_LIST_ITEMS instead, then all 6 items defined in the 2 lists will be combined and passed to the tool at once. +Filtering Begin and End Times +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting in v5.0.0, the [fcst/obs]_[init/valid]_[beg/end] in the wrapped +MET config file can be set using the corresponding METplus config variables. +The values can include the filename template tags that are supported in the +wrapper (see :ref:`stat-analysis-filename-template`). For example, +to set the fcst_valid_beg value:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_FCST_VALID_BEG = {fcst_valid_beg?fmt=%Y%m%d_%H%M%S} + +This will set fcst_valid_beg = "20221014_000000"; in the MET config file. + +Prior to v5.0.0, settings hour values in [FCST/OBS]_[INIT/VALID]_HOUR_LIST +would result in the corresponding _beg and _end values in the wrapped MET +config file to be set based on the hours and the [INIT/VALID]_[BEG/END] values. + + +.. _stat-analysis-filename-template: + Additional Filename Template Tags ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -6975,6 +6997,14 @@ The following values are optional in the METplus configuration file: | :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` +| :term:`STAT_ANALYSIS_FCST_INIT_BEG` +| :term:`STAT_ANALYSIS_FCST_INIT_END` +| :term:`STAT_ANALYSIS_FCST_VALID_BEG` +| :term:`STAT_ANALYSIS_FCST_VALID_END` +| :term:`STAT_ANALYSIS_OBS_INIT_BEG` +| :term:`STAT_ANALYSIS_OBS_INIT_END` +| :term:`STAT_ANALYSIS_OBS_VALID_BEG` +| :term:`STAT_ANALYSIS_OBS_VALID_END` | :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` .. warning:: **DEPRECATED:** From 9d74886731b37f421f009a8ca2c5b2e13b36af4e Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:48:23 -0600 Subject: [PATCH 79/92] added rest of new config variables to glossary and fixed formatting that was removing first letter of sentence for CUSTOM_LOOP_LIST --- docs/Users_Guide/glossary.rst | 131 ++++++++++++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 6 deletions(-) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index a49d3d75ce..9de1696a47 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -20,9 +20,9 @@ METplus Configuration Glossary [dir] SERIES_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/{custom?fmt=%s} - With this configuration, SeriesAnalysis will be called twice. The first run will use SeriesAnalysisConfig_one and write output to {OUTPUT_BASE}/one. The second run will use SeriesAnalysisConfig_two and write output to {OUTPUT_BASE}/two. + With this configuration, SeriesAnalysis will be called twice. The first run will use SeriesAnalysisConfig_one and write output to {OUTPUT_BASE}/one. The second run will use SeriesAnalysisConfig_two and write output to {OUTPUT_BASE}/two. - If unset or left blank, the wrapper will run once per run time. There are also wrapper-specific configuration variables to define a custom string loop list for a single wrapper, i.e. :term:`SERIES_ANALYSIS_CUSTOM_LOOP_LIST` and :term:`PCP_COMBINE_CUSTOM_LOOP_LIST`. + If unset or left blank, the wrapper will run once per run time. There are also wrapper-specific configuration variables to define a custom string loop list for a single wrapper, i.e. :term:`SERIES_ANALYSIS_CUSTOM_LOOP_LIST` and :term:`PCP_COMBINE_CUSTOM_LOOP_LIST`. | *Used by:* Many @@ -9960,12 +9960,131 @@ METplus Configuration Glossary STAT_ANALYSIS_FCST_INIT_BEG Specify the value for 'fcst_init_beg' in the MET configuration file for StatAnalysis. This can refer to filename template tags that are set by - the wrapper. Example: + the wrapper. Example:: - [config] - INIT_BEG = 20221014 - STAT_ANALYSIS_FCST_INIT_BEG = {fcst_init_beg?fmt=%Y%m%d_%H} + [config] + INIT_BEG = 20221014 + STAT_ANALYSIS_FCST_INIT_BEG = {fcst_init_beg?fmt=%Y%m%d_%H} will set fcst_init_beg = "20221014_00"; in the wrapped MET config file. | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_INIT_END + Specify the value for 'fcst_init_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_END = 20221015 + STAT_ANALYSIS_FCST_INIT_END = {fcst_init_beg?fmt=%Y%m%d}_12 + + will set fcst_init_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_INIT_BEG + Specify the value for 'obs_init_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_BEG = 20221014 + STAT_ANALYSIS_OBS_INIT_BEG = {obs_init_beg?fmt=%Y%m%d_%H} + + will set obs_init_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_INIT_END + Specify the value for 'obs_init_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_END = 20221015 + STAT_ANALYSIS_OBS_INIT_END = {obs_init_end?fmt=%Y%m%d}_12 + + will set obs_init_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_VALID_BEG + Specify the value for 'fcst_valid_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_FCST_VALID_BEG = {fcst_valid_beg?fmt=%Y%m%d_%H} + + will set fcst_valid_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_VALID_END + Specify the value for 'fcst_valid_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_END = 20221015 + STAT_ANALYSIS_FCST_VALID_END = {fcst_valid_beg?fmt=%Y%m%d}_12 + + will set fcst_valid_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_VALID_BEG + Specify the value for 'obs_valid_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_OBS_VALID_BEG = {obs_valid_beg?fmt=%Y%m%d_%H} + + will set obs_valid_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_VALID_END + Specify the value for 'obs_valid_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_END = 20221015 + STAT_ANALYSIS_OBS_VALID_END = {obs_valid_end?fmt=%Y%m%d}_12 + + will set obs_valid_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_INIT_BEG + Specify the value for both 'fcst_init_beg' and 'obs_init_beg' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_INIT_BEG`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_INIT_END + Specify the value for both 'fcst_init_end' and 'obs_init_end' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_INIT_END`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_VALID_BEG + Specify the value for both 'fcst_valid_beg' and 'obs_valid_beg' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_VALID_BEG`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_VALID_END + Specify the value for both 'fcst_valid_end' and 'obs_valid_end' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_VALID_END`. + + | *Used by:* StatAnalysis From 339e31ab4688f13f072d2cd20a21537f1416aed0 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 09:43:34 -0600 Subject: [PATCH 80/92] remove env vars that are no longer needed --- internal/tests/pytests/minimum_pytest.corrinado.sh | 13 ------------- internal/tests/pytests/minimum_pytest.dakota.sh | 9 --------- internal/tests/pytests/minimum_pytest.eyewall.sh | 9 --------- internal/tests/pytests/minimum_pytest.hera.sh | 9 --------- internal/tests/pytests/minimum_pytest.kiowa.sh | 9 --------- internal/tests/pytests/minimum_pytest.venus.sh | 9 --------- 6 files changed, 58 deletions(-) delete mode 100644 internal/tests/pytests/minimum_pytest.corrinado.sh diff --git a/internal/tests/pytests/minimum_pytest.corrinado.sh b/internal/tests/pytests/minimum_pytest.corrinado.sh deleted file mode 100644 index 0555a9f7e1..0000000000 --- a/internal/tests/pytests/minimum_pytest.corrinado.sh +++ /dev/null @@ -1,13 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=${HOME}/data/METplus_Data -export METPLUS_TEST_OUTPUT_BASE=${HOME}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=${HOME}/met/9.0-beta3 -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp - -export METPLUS_TEST_EXE_WGRIB2=wgrib2 -export METPLUS_TEST_EXE_CUT=cut -export METPLUS_TEST_EXE_TR=tr -export METPLUS_TEST_EXE_RM=rm -export METPLUS_TEST_EXE_NCAP2=ncap2 -export METPLUS_TEST_EXE_CONVERT=convert -export METPLUS_TEST_EXE_NCDUMP=ncdump -export METPLUS_TEST_EXE_EGREP=egrep diff --git a/internal/tests/pytests/minimum_pytest.dakota.sh b/internal/tests/pytests/minimum_pytest.dakota.sh index e3c93beef7..0b66555fa9 100644 --- a/internal/tests/pytests/minimum_pytest.dakota.sh +++ b/internal/tests/pytests/minimum_pytest.dakota.sh @@ -2,12 +2,3 @@ export METPLUS_TEST_INPUT_BASE=/d3/projects/MET/METplus_Data export METPLUS_TEST_OUTPUT_BASE=/d3/personal/${USER}/pytest export METPLUS_TEST_MET_INSTALL_DIR=/d3/projects/MET/MET_releases/met-9.1_beta3 export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp - -export METPLUS_TEST_EXE_WGRIB2=/usr/local/bin/wgrib2 -export METPLUS_TEST_EXE_CUT=/usr/bin/cut -export METPLUS_TEST_EXE_TR=/usr/bin/tr -export METPLUS_TEST_EXE_RM=/bin/rm -export METPLUS_TEST_EXE_NCAP2=/usr/local/nco/bin/ncap2 -export METPLUS_TEST_EXE_CONVERT=/usr/bin/convert -export METPLUS_TEST_EXE_NCDUMP=/usr/local/bin/ncdump -export METPLUS_TEST_EXE_EGREP=/bin/egrep diff --git a/internal/tests/pytests/minimum_pytest.eyewall.sh b/internal/tests/pytests/minimum_pytest.eyewall.sh index b2a8a99753..06a69dd650 100644 --- a/internal/tests/pytests/minimum_pytest.eyewall.sh +++ b/internal/tests/pytests/minimum_pytest.eyewall.sh @@ -3,12 +3,3 @@ export METPLUS_TEST_OUTPUT_BASE=/d1/${USER}/pytest export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met-9.0 #export METPLUS_TEST_MET_INSTALL_DIR=/d1/CODE/MET/MET_releases/met-9.0_beta4 export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp - -export METPLUS_TEST_EXE_WGRIB2=/usr/local/bin/wgrib2 -export METPLUS_TEST_EXE_CUT=/usr/bin/cut -export METPLUS_TEST_EXE_TR=/usr/bin/tr -export METPLUS_TEST_EXE_RM=/bin/rm -export METPLUS_TEST_EXE_NCAP2=/usr/local/nco/bin/ncap2 -export METPLUS_TEST_EXE_CONVERT=/usr/bin/convert -export METPLUS_TEST_EXE_NCDUMP=/usr/local/bin/ncdump -export METPLUS_TEST_EXE_EGREP=/bin/egrep diff --git a/internal/tests/pytests/minimum_pytest.hera.sh b/internal/tests/pytests/minimum_pytest.hera.sh index 64407e59a0..bfb541180d 100644 --- a/internal/tests/pytests/minimum_pytest.hera.sh +++ b/internal/tests/pytests/minimum_pytest.hera.sh @@ -2,12 +2,3 @@ export METPLUS_TEST_INPUT_BASE=/home/${USER}/metplus_pytests export METPLUS_TEST_OUTPUT_BASE=/home/${USER}/metplus_pytests/out export METPLUS_TEST_MET_INSTALL_DIR=/contrib/met/8.1 export METPLUS_TEST_TMP_DIR=/tmp - -export METPLUS_TEST_EXE_WGRIB2=/apps/wgrib2/2.0.8/intel/18.0.3.222/bin/wgrib2 -export METPLUS_TEST_EXE_CUT=/usr/bin/cut -export METPLUS_TEST_EXE_TR=/usr/bin/tr -export METPLUS_TEST_EXE_RM=/usr/bin/rm -export METPLUS_TEST_EXE_NCAP2=/apps/nco/4.7.0/intel/18.0.3.051/bin/ncap2 -export METPLUS_TEST_EXE_CONVERT=/usr/bin/convert -export METPLUS_TEST_EXE_NCDUMP=/apps/netcdf/4.7.0/intel/18.0.5.274/bin/ncdump -export METPLUS_TEST_EXE_EGREP=/usr/bin/grep diff --git a/internal/tests/pytests/minimum_pytest.kiowa.sh b/internal/tests/pytests/minimum_pytest.kiowa.sh index 655f80f2d0..889bf4cce9 100644 --- a/internal/tests/pytests/minimum_pytest.kiowa.sh +++ b/internal/tests/pytests/minimum_pytest.kiowa.sh @@ -3,12 +3,3 @@ export METPLUS_TEST_OUTPUT_BASE=/d1/personal/${USER}/pytest export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met-9.0 #export METPLUS_TEST_MET_INSTALL_DIR=/d1/projects/MET/MET_releases/met-9.0_beta4 export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp -#export METPLUS_TEST_TMP_DIR=/tmp -export METPLUS_TEST_EXE_WGRIB2=/usr/local/bin/wgrib2 -export METPLUS_TEST_EXE_CUT=/usr/bin/cut -export METPLUS_TEST_EXE_TR=/usr/bin/tr -export METPLUS_TEST_EXE_RM=/bin/rm -export METPLUS_TEST_EXE_NCAP2=/usr/local/nco/bin/ncap2 -export METPLUS_TEST_EXE_CONVERT=/usr/bin/convert -export METPLUS_TEST_EXE_NCDUMP=/usr/local/bin/ncdump -export METPLUS_TEST_EXE_EGREP=/bin/egrep diff --git a/internal/tests/pytests/minimum_pytest.venus.sh b/internal/tests/pytests/minimum_pytest.venus.sh index 493f861ff1..2c4774e348 100644 --- a/internal/tests/pytests/minimum_pytest.venus.sh +++ b/internal/tests/pytests/minimum_pytest.venus.sh @@ -2,12 +2,3 @@ export METPLUS_TEST_INPUT_BASE=/gpfs/dell2/emc/verification/noscrub/$USER/METplu export METPLUS_TEST_OUTPUT_BASE=/gpfs/dell2/emc/verification/noscrub/$USER/metplus_test export METPLUS_TEST_MET_INSTALL_DIR=/gpfs/dell2/emc/verification/noscrub/$USER/met/9.0_beta4 export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp - -export METPLUS_TEST_EXE_WGRIB2=$WGRIB2 -export METPLUS_TEST_EXE_CUT=cut -export METPLUS_TEST_EXE_TR=tr -export METPLUS_TEST_EXE_RM=rm -export METPLUS_TEST_EXE_NCAP2=ncap2 -export METPLUS_TEST_EXE_CONVERT=convert -export METPLUS_TEST_EXE_NCDUMP=ncdump -export METPLUS_TEST_EXE_EGREP=egrep From beedf87aafd0eb1af160e0db3fa59d465abdd2cb Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 09:43:54 -0600 Subject: [PATCH 81/92] use latest release of MET for tests --- internal/tests/pytests/minimum_pytest.kiowa.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/tests/pytests/minimum_pytest.kiowa.sh b/internal/tests/pytests/minimum_pytest.kiowa.sh index 889bf4cce9..33cb80aa93 100644 --- a/internal/tests/pytests/minimum_pytest.kiowa.sh +++ b/internal/tests/pytests/minimum_pytest.kiowa.sh @@ -1,5 +1,5 @@ export METPLUS_TEST_INPUT_BASE=/d1/projects/METplus/METplus_Data export METPLUS_TEST_OUTPUT_BASE=/d1/personal/${USER}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met-9.0 +export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met #export METPLUS_TEST_MET_INSTALL_DIR=/d1/projects/MET/MET_releases/met-9.0_beta4 export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp From 5057f23aadeebb600a175a7f106c6d7f80d45fef Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 10:20:33 -0600 Subject: [PATCH 82/92] fixed path to generate code coverage report --- .github/actions/run_tests/entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/run_tests/entrypoint.sh b/.github/actions/run_tests/entrypoint.sh index 7dda2d2133..3bfad88343 100644 --- a/.github/actions/run_tests/entrypoint.sh +++ b/.github/actions/run_tests/entrypoint.sh @@ -61,7 +61,7 @@ if [[ "$INPUT_CATEGORIES" == pytests* ]]; then for x in `cat $PYTESTS_GROUPS_FILEPATH`; do marker="${x//_or_/ or }" marker="${marker//not_/not }" - command+="/usr/local/envs/pytest/bin/pytest -vv --cov=../../metplus -m \"$marker\"" + command+="/usr/local/envs/pytest/bin/pytest -vv --cov=../../../metplus -m \"$marker\"" command+=";if [ \$? != 0 ]; then status=1; fi;" done command+="if [ \$status != 0 ]; then echo ERROR: Some pytests failed. Search for FAILED to review; false; fi" From 404f034a118230e51fce9ff40658f98b6d21b1c2 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 10:23:28 -0600 Subject: [PATCH 83/92] Changed directory to write test output to a directory called test_output under METPLUS_TEST_OUTPUT_BASE and added logic to remove that directory before running tests. This prevents false success of tests that generate files and checks if they exist if they were generated from a previous successful run. Write to directory with hard-coded name to prevent accidentally scrubbing other files if the env var is not set properly --- internal/tests/pytests/conftest.py | 19 +++++++++++++++++-- internal/tests/pytests/minimum_pytest.conf | 2 +- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index 97af766988..7b0f9a1c35 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -4,6 +4,7 @@ import subprocess import pytest import getpass +import shutil from pathlib import Path # add METplus directory to path so the wrappers and utilities can be found @@ -19,7 +20,8 @@ if pytest_host is None: import socket pytest_host = socket.gethostname() - print(f"No hostname provided with METPLUS_PYTEST_HOST, using {pytest_host}") + print("No hostname provided with METPLUS_PYTEST_HOST, " + f"using {pytest_host}") else: print(f"METPLUS_PYTEST_HOST = {pytest_host}") @@ -33,7 +35,8 @@ # source minimum_pytest..sh script current_user = getpass.getuser() -command = shlex.split(f"env -i bash -c 'export USER={current_user} && source {minimum_pytest_file} && env'") +command = shlex.split(f"env -i bash -c 'export USER={current_user} && " + f"source {minimum_pytest_file} && env'") proc = subprocess.Popen(command, stdout=subprocess.PIPE) for line in proc.stdout: @@ -43,6 +46,18 @@ proc.communicate() +output_base = os.environ['METPLUS_TEST_OUTPUT_BASE'] +if not output_base: + print('ERROR: METPLUS_TEST_OUTPUT_BASE must be set to a path to write') + sys.exit(1) + +test_output_dir = os.path.join(output_base, 'test_output') +print(f'Test output dir is {test_output_dir}') +if os.path.exists(test_output_dir): + print(f'Removing test output dir: {test_output_dir}') + shutil.rmtree(test_output_dir) + + @pytest.fixture(scope='function') def metplus_config(): """! Create a METplus configuration object that can be diff --git a/internal/tests/pytests/minimum_pytest.conf b/internal/tests/pytests/minimum_pytest.conf index 5a68934956..0a54aec6d3 100644 --- a/internal/tests/pytests/minimum_pytest.conf +++ b/internal/tests/pytests/minimum_pytest.conf @@ -1,6 +1,6 @@ [config] INPUT_BASE = {ENV[METPLUS_TEST_INPUT_BASE]} -OUTPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]} +OUTPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]}/test_output MET_INSTALL_DIR = {ENV[METPLUS_TEST_MET_INSTALL_DIR]} TMP_DIR = {ENV[METPLUS_TEST_TMP_DIR]} From 47b5a2f6d875efdd8865db35e0c7289eac594cbc Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 10:24:32 -0600 Subject: [PATCH 84/92] per dtcenter/METplus-Internal#24, added tests to ensure that run_metplus.py script returns an expected failure return code --- .github/parm/pytest_groups.txt | 1 + internal/tests/pytests/pytest.ini | 1 + .../pytests/run_metplus/test_run_metplus.py | 48 +++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 internal/tests/pytests/run_metplus/test_run_metplus.py diff --git a/.github/parm/pytest_groups.txt b/.github/parm/pytest_groups.txt index 374b99da80..fda02d7394 100644 --- a/.github/parm/pytest_groups.txt +++ b/.github/parm/pytest_groups.txt @@ -1,3 +1,4 @@ +run_metplus util wrapper wrapper_a diff --git a/internal/tests/pytests/pytest.ini b/internal/tests/pytests/pytest.ini index 8630509ec0..140a898f8e 100644 --- a/internal/tests/pytests/pytest.ini +++ b/internal/tests/pytests/pytest.ini @@ -1,5 +1,6 @@ [pytest] markers = + run_metplus: custom marker for testing run_metplus.py script util: custom marker for testing metplus/util logic wrapper_a: custom marker for testing metplus/wrapper logic - A group wrapper_b: custom marker for testing metplus/wrapper logic - B group diff --git a/internal/tests/pytests/run_metplus/test_run_metplus.py b/internal/tests/pytests/run_metplus/test_run_metplus.py new file mode 100644 index 0000000000..61ad30e84e --- /dev/null +++ b/internal/tests/pytests/run_metplus/test_run_metplus.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import pytest + +from pathlib import Path +import os +from subprocess import run + +# get METplus directory relative to this file +# from this script's directory, go up 4 directories +METPLUS_DIR = str(Path(__file__).parents[4]) +RUN_METPLUS = os.path.join(METPLUS_DIR, 'ush', 'run_metplus.py') +EXAMPLE_CONF = os.path.join(METPLUS_DIR, 'parm', 'use_cases', + 'met_tool_wrapper', 'Example', 'Example.conf') +MINIMUM_CONF = os.path.join(METPLUS_DIR, 'internal', 'tests', 'pytests', + 'minimum_pytest.conf') +TEST_OUTPUT_DIR = os.path.join(os.environ['METPLUS_TEST_OUTPUT_BASE'], + 'test_output') + + +@pytest.mark.run_metplus +def test_run_metplus_exists(): + """! Check that run_metplus.py script exists """ + assert os.path.exists(RUN_METPLUS) + + +@pytest.mark.parametrize( + 'command, expected_return_code', [ + ([RUN_METPLUS], 2), # 0 - no arguments, failure + ([RUN_METPLUS, EXAMPLE_CONF], 2), # 1 - minimum conf unset, failure + ([RUN_METPLUS, EXAMPLE_CONF, MINIMUM_CONF], 0), # 2 - success + ] +) +@pytest.mark.run_metplus +def test_run_metplus_check_return_code(command, expected_return_code): + """! Call run_metplus.py without various arguments and check that the + expected value is returned by the script. A successful run should return + 0 and a failed run should return a non-zero return code, typically 2. + """ + process = run(command) + assert process.returncode == expected_return_code + + +@pytest.mark.run_metplus +def test_output_dir_is_created(): + """! Check that the test output directory was created after running tests + """ + assert os.path.exists(TEST_OUTPUT_DIR) From 5547117df2b4404161358461430b3f6372dcac96 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 12:02:38 -0600 Subject: [PATCH 85/92] save changes --- internal/tests/pytests/conftest.py | 8 +++-- internal/tests/pytests/minimum_pytest.conf | 2 +- .../pytests/run_metplus/test_run_metplus.py | 13 +++++--- .../ascii2nc/test_ascii2nc_wrapper.py | 32 ++++++++++++------- 4 files changed, 36 insertions(+), 19 deletions(-) diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index 7b0f9a1c35..df7496f4b0 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -52,7 +52,6 @@ sys.exit(1) test_output_dir = os.path.join(output_base, 'test_output') -print(f'Test output dir is {test_output_dir}') if os.path.exists(test_output_dir): print(f'Removing test output dir: {test_output_dir}') shutil.rmtree(test_output_dir) @@ -70,8 +69,11 @@ def read_configs(extra_configs=[]): script_dir = os.path.dirname(__file__) minimum_conf = os.path.join(script_dir, 'minimum_pytest.conf') args = [minimum_conf] - if extra_configs: - args.extend(extra_configs) + for extra_config in extra_configs: + if extra_config.startswith('use_cases'): + args.append(os.path.join(metplus_dir, 'parm', extra_config)) + elif extra_config: + args.append(extra_config) config = config_metplus.setup(args) return config diff --git a/internal/tests/pytests/minimum_pytest.conf b/internal/tests/pytests/minimum_pytest.conf index 0a54aec6d3..9982acc1a4 100644 --- a/internal/tests/pytests/minimum_pytest.conf +++ b/internal/tests/pytests/minimum_pytest.conf @@ -1,6 +1,6 @@ [config] INPUT_BASE = {ENV[METPLUS_TEST_INPUT_BASE]} -OUTPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]}/test_output +OUTPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]}/test_output/{RUN_ID} MET_INSTALL_DIR = {ENV[METPLUS_TEST_MET_INSTALL_DIR]} TMP_DIR = {ENV[METPLUS_TEST_TMP_DIR]} diff --git a/internal/tests/pytests/run_metplus/test_run_metplus.py b/internal/tests/pytests/run_metplus/test_run_metplus.py index 61ad30e84e..6567e2d8e7 100644 --- a/internal/tests/pytests/run_metplus/test_run_metplus.py +++ b/internal/tests/pytests/run_metplus/test_run_metplus.py @@ -4,6 +4,7 @@ from pathlib import Path import os +import shutil from subprocess import run # get METplus directory relative to this file @@ -16,7 +17,8 @@ 'minimum_pytest.conf') TEST_OUTPUT_DIR = os.path.join(os.environ['METPLUS_TEST_OUTPUT_BASE'], 'test_output') - +NEW_OUTPUT_BASE = os.path.join(TEST_OUTPUT_DIR, 'run_metplus') +OUTPUT_BASE_OVERRIDE = f"config.OUTPUT_BASE={NEW_OUTPUT_BASE}" @pytest.mark.run_metplus def test_run_metplus_exists(): @@ -26,9 +28,9 @@ def test_run_metplus_exists(): @pytest.mark.parametrize( 'command, expected_return_code', [ - ([RUN_METPLUS], 2), # 0 - no arguments, failure - ([RUN_METPLUS, EXAMPLE_CONF], 2), # 1 - minimum conf unset, failure - ([RUN_METPLUS, EXAMPLE_CONF, MINIMUM_CONF], 0), # 2 - success + ([RUN_METPLUS], 2), + ([RUN_METPLUS, EXAMPLE_CONF], 2), + ([RUN_METPLUS, EXAMPLE_CONF, MINIMUM_CONF, OUTPUT_BASE_OVERRIDE], 0), ] ) @pytest.mark.run_metplus @@ -40,6 +42,9 @@ def test_run_metplus_check_return_code(command, expected_return_code): process = run(command) assert process.returncode == expected_return_code + if os.path.exists(NEW_OUTPUT_BASE): + shutil.rmtree(NEW_OUTPUT_BASE) + @pytest.mark.run_metplus def test_output_dir_is_created(): diff --git a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py index f492f1f349..ace63c816d 100644 --- a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py @@ -3,17 +3,20 @@ import pytest import os +import shutil from metplus.wrappers.ascii2nc_wrapper import ASCII2NCWrapper def ascii2nc_wrapper(metplus_config, config_path=None, config_overrides=None): - config = metplus_config() - - if config_path: - parm_base = config.getdir('PARM_BASE') - config_full_path = os.path.join(parm_base, config_path) - config = metplus_config([config_full_path]) + config = metplus_config([config_path]) + # config = metplus_config() + # + # if config_path: + # config = metplus_config([config_path]) + # parm_base = config.getdir('PARM_BASE') + # config_full_path = os.path.join(parm_base, config_path) + # config = metplus_config([config_full_path]) overrides = {'DO_NOT_RUN_EXE': True, 'INPUT_MUST_EXIST': False} @@ -27,8 +30,7 @@ def ascii2nc_wrapper(metplus_config, config_path=None, config_overrides=None): for key, value in overrides.items(): config.set(instance, key, value) - return ASCII2NCWrapper(config, - instance=instance) + return ASCII2NCWrapper(config, instance=instance) @pytest.mark.parametrize( @@ -171,7 +173,7 @@ def test_ascii2nc_wrapper(metplus_config, config_overrides, f"-config {config_file} " f"{verbosity}") - assert(all_commands[0][0] == expected_cmd) + assert all_commands[0][0] == expected_cmd env_vars = all_commands[0][1] # check that environment variables were set properly @@ -182,10 +184,14 @@ def test_ascii2nc_wrapper(metplus_config, config_overrides, for env_var_key in env_var_keys: match = next((item for item in env_vars if item.startswith(env_var_key)), None) - assert (match is not None) + assert match is not None value = match.split('=', 1)[1] - assert (env_var_values.get(env_var_key, '') == value) + assert env_var_values.get(env_var_key, '') == value + + #output_base = wrapper.config.getdir('OUTPUT_BASE') + #if output_base: + # shutil.rmtree(output_base) @pytest.mark.wrapper @@ -200,3 +206,7 @@ def test_get_config_file(metplus_config): config.set('config', 'ASCII2NC_CONFIG_FILE', fake_config_name) wrapper = ASCII2NCWrapper(config) assert wrapper.c_dict['CONFIG_FILE'] == fake_config_name + + #output_base = wrapper.config.getdir('OUTPUT_BASE') + #if output_base: + # shutil.rmtree(output_base) From 3c760f80040cb4d4b4963a9687674fd3b7c1f4c6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 13:01:05 -0600 Subject: [PATCH 86/92] Modified pytest fixture logic so that tests that create a minimum config file will automatically scrub the test output including logs and final conf only if the test succeeds. This makes it much easier to review test output to investigate issues. Tests that currently read in additional configuration files now call the metplus_config_files fixture to preserve the old behavior but those cases will not automatically clean up the output data. Eventually update those tests to set the config settings in the test instead of reading a separate file so that all tests that use a METplusConfig object will be scrubbed unless they fail --- internal/tests/pytests/conftest.py | 35 +++++++++++++- .../tests/pytests/util/config/test_config.py | 29 +++++------ .../config_metplus/test_config_metplus.py | 48 +++++++++---------- .../pytests/util/logging/test_logging.py | 8 ++-- .../util/met_config/test_met_config.py | 6 +-- .../pytests/util/met_util/test_met_util.py | 28 +++++------ .../util/time_looping/test_time_looping.py | 14 +++--- .../ascii2nc/test_ascii2nc_wrapper.py | 28 ++++------- .../command_builder/test_command_builder.py | 48 +++++++++---------- .../compare_gridded/test_compare_gridded.py | 2 +- .../test_ensemble_stat_wrapper.py | 10 ++-- .../extract_tiles/test_extract_tiles.py | 40 ++++++++-------- .../gen_ens_prod/test_gen_ens_prod_wrapper.py | 6 +-- .../wrappers/gen_vx_mask/test_gen_vx_mask.py | 2 +- .../wrappers/grid_diag/test_grid_diag.py | 6 +-- .../grid_stat/test_grid_stat_wrapper.py | 8 ++-- .../wrappers/ioda2nc/test_ioda2nc_wrapper.py | 4 +- .../wrappers/mode/test_mode_wrapper.py | 8 ++-- .../pytests/wrappers/mtd/test_mtd_wrapper.py | 4 +- .../wrappers/pb2nc/test_pb2nc_wrapper.py | 22 ++++----- .../pcp_combine/test_pcp_combine_wrapper.py | 45 ++++++++--------- .../test_plot_point_obs_wrapper.py | 4 +- .../wrappers/point2grid/test_point2grid.py | 2 +- .../point_stat/test_point_stat_wrapper.py | 6 +-- .../test_regrid_data_plane.py | 6 +-- .../runtime_freq/test_runtime_freq.py | 2 +- .../series_analysis/test_series_analysis.py | 30 +++++++++--- .../stat_analysis/test_stat_analysis.py | 16 +++---- .../wrappers/tc_gen/test_tc_gen_wrapper.py | 4 +- .../tc_pairs/test_tc_pairs_wrapper.py | 16 +++---- .../wrappers/tc_stat/test_tc_stat_wrapper.py | 18 +++---- .../wrappers/user_script/test_user_script.py | 2 +- 32 files changed, 272 insertions(+), 235 deletions(-) diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index df7496f4b0..d7084af9fa 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -58,13 +58,13 @@ @pytest.fixture(scope='function') -def metplus_config(): +def metplus_config_files(): """! Create a METplus configuration object that can be manipulated/modified to reflect different paths, directories, values, etc. for individual tests. """ - def read_configs(extra_configs=[]): + def read_configs(extra_configs): # Read in minimum pytest config file and any other extra configs script_dir = os.path.dirname(__file__) minimum_conf = os.path.join(script_dir, 'minimum_pytest.conf') @@ -79,3 +79,34 @@ def read_configs(extra_configs=[]): return config return read_configs + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + # execute all other hooks to obtain the report object + outcome = yield + rep = outcome.get_result() + + # set a report attribute for each phase of a call, which can + # be "setup", "call", "teardown" + + setattr(item, "rep_" + rep.when, rep) + +#@pytest.fixture(scope='function') +@pytest.fixture() +def metplus_config(request): + """! Create a METplus configuration object that can be + manipulated/modified to + reflect different paths, directories, values, etc. for individual + tests. + """ + script_dir = os.path.dirname(__file__) + args = [os.path.join(script_dir, 'minimum_pytest.conf')] + config = config_metplus.setup(args) + yield config + + # don't remove output base if test fails + if request.node.rep_call.failed: + return + config_output_base = config.getdir('OUTPUT_BASE') + if config_output_base and os.path.exists(config_output_base): + shutil.rmtree(config_output_base) diff --git a/internal/tests/pytests/util/config/test_config.py b/internal/tests/pytests/util/config/test_config.py index 7c054ab3d8..2d0a927f49 100644 --- a/internal/tests/pytests/util/config/test_config.py +++ b/internal/tests/pytests/util/config/test_config.py @@ -28,7 +28,7 @@ ) @pytest.mark.util def test_getseconds(metplus_config, input_value, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_SECONDS', input_value) @@ -57,7 +57,7 @@ def test_getseconds(metplus_config, input_value, result): ) @pytest.mark.util def test_getstr(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETSTR', input_value) @@ -82,7 +82,7 @@ def test_getstr(metplus_config, input_value, default, result): ) @pytest.mark.util def test_getdir(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETDIR', input_value) @@ -110,7 +110,7 @@ def test_getdir(metplus_config, input_value, default, result): ) @pytest.mark.util def test_getraw(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config conf.set('config', 'TEST_EXTRA', 'extra') conf.set('config', 'TEST_EXTRA2', '{TEST_EXTRA}_extra') @@ -144,7 +144,7 @@ def test_getraw(metplus_config, input_value, default, result): ) @pytest.mark.util def test_getbool(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETBOOL', input_value) @@ -167,7 +167,7 @@ def test_getbool(metplus_config, input_value, default, result): ) @pytest.mark.util def test_getexe(metplus_config, input_value, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETEXE', input_value) @@ -189,7 +189,7 @@ def test_getexe(metplus_config, input_value, result): ] ) def test_getfloat(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETFLOAT', input_value) @@ -220,7 +220,7 @@ def test_getfloat(metplus_config, input_value, default, result): ) @pytest.mark.util def test_getint(metplus_config, input_value, default, result): - conf = metplus_config() + conf = metplus_config if input_value is not None: conf.set('config', 'TEST_GETINT', input_value) @@ -241,14 +241,15 @@ def test_getint(metplus_config, input_value, default, result): ] ) @pytest.mark.util -def test_move_all_to_config_section(metplus_config, config_key, expected_result): +def test_move_all_to_config_section(metplus_config_files, config_key, + expected_result): config_files = ['config_1.conf', 'config_2.conf', 'config_3.conf', ] test_dir = os.path.dirname(__file__) config_files = [os.path.join(test_dir, item) for item in config_files] - config = metplus_config(config_files) + config = metplus_config_files(config_files) assert config.getstr('config', config_key) == expected_result @@ -280,9 +281,9 @@ def test_move_all_to_config_section(metplus_config, config_key, expected_result) ] ) @pytest.mark.util -def test_move_all_to_config_section_cmd_line(metplus_config, overrides, +def test_move_all_to_config_section_cmd_line(metplus_config_files, overrides, config_key, expected_result): - config = metplus_config(overrides) + config = metplus_config_files(overrides) assert config.getstr('config', config_key, '') == expected_result @@ -330,13 +331,13 @@ def test_move_all_to_config_section_cmd_line(metplus_config, overrides, ] ) @pytest.mark.util -def test_getraw_nested_curly_braces(metplus_config, +def test_getraw_nested_curly_braces(metplus_config_files, config_name, expected_result): config_files = ['config_1.conf', ] test_dir = os.path.dirname(__file__) config_files = [os.path.join(test_dir, item) for item in config_files] - config = metplus_config(config_files) + config = metplus_config_files(config_files) sec, name = config_name.split('.', 1) assert config.getraw(sec, name) == expected_result diff --git a/internal/tests/pytests/util/config_metplus/test_config_metplus.py b/internal/tests/pytests/util/config_metplus/test_config_metplus.py index 8332aba14c..befaa25316 100644 --- a/internal/tests/pytests/util/config_metplus/test_config_metplus.py +++ b/internal/tests/pytests/util/config_metplus/test_config_metplus.py @@ -72,7 +72,7 @@ def test_get_default_config_list(): @pytest.mark.util def test_find_indices_in_config_section(metplus_config, regex, index, id, expected_result): - config = metplus_config() + config = metplus_config config.set('config', 'FCST_VAR1_NAME', 'name1') config.set('config', 'FCST_VAR1_LEVELS', 'level1') config.set('config', 'FCST_VAR2_NAME', 'name2') @@ -118,7 +118,7 @@ def test_find_indices_in_config_section(metplus_config, regex, index, ) @pytest.mark.util def test_get_custom_string_list(metplus_config, conf_items, met_tool, expected_result): - config = metplus_config() + config = metplus_config for conf_key, conf_value in conf_items.items(): config.set('config', conf_key, conf_value) @@ -146,7 +146,7 @@ def test_find_var_indices_fcst(metplus_config, config_var_name, expected_indices, set_met_tool): - config = metplus_config() + config = metplus_config data_types = ['FCST'] config.set('config', config_var_name, "NAME1") met_tool = 'grid_stat' if set_met_tool else None @@ -229,7 +229,7 @@ def test_get_field_search_prefixes(data_type, met_tool, expected_out): ) @pytest.mark.util def test_is_var_item_valid(metplus_config, item_list, extension, is_valid): - conf = metplus_config() + conf = metplus_config assert config_metplus.is_var_item_valid(item_list, '1', extension, conf)[0] == is_valid @@ -272,7 +272,7 @@ def test_is_var_item_valid(metplus_config, item_list, extension, is_valid): ) @pytest.mark.util def test_is_var_item_valid_levels(metplus_config, item_list, configs_to_set, is_valid): - conf = metplus_config() + conf = metplus_config for key, value in configs_to_set.items(): conf.set('config', key, value) @@ -321,7 +321,7 @@ def test_get_field_config_variables(metplus_config, search_prefixes, config_overrides, expected_value): - config = metplus_config() + config = metplus_config index = '1' field_info_types = ['name', 'levels', 'thresh', 'options', 'output_names'] for field_info_type in field_info_types: @@ -388,7 +388,7 @@ def test_get_field_config_variables_synonyms(metplus_config, config_keys, field_key, expected_value): - config = metplus_config() + config = metplus_config index = '1' prefix = 'BOTH_REGRID_DATA_PLANE_' for key in config_keys: @@ -411,7 +411,7 @@ def test_get_field_config_variables_synonyms(metplus_config, ) @pytest.mark.util def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): - conf = metplus_config() + conf = metplus_config conf.set('config', 'FCST_VAR1_NAME', "NAME1") conf.set('config', 'FCST_VAR1_LEVELS', "LEVELS11, LEVELS12") conf.set('config', 'FCST_VAR2_NAME', "NAME2") @@ -448,7 +448,7 @@ def test_parse_var_list_fcst_only(metplus_config, data_type, list_created): ) @pytest.mark.util def test_parse_var_list_obs(metplus_config, data_type, list_created): - conf = metplus_config() + conf = metplus_config conf.set('config', 'OBS_VAR1_NAME', "NAME1") conf.set('config', 'OBS_VAR1_LEVELS', "LEVELS11, LEVELS12") conf.set('config', 'OBS_VAR2_NAME', "NAME2") @@ -485,7 +485,7 @@ def test_parse_var_list_obs(metplus_config, data_type, list_created): ) @pytest.mark.util def test_parse_var_list_both(metplus_config, data_type, list_created): - conf = metplus_config() + conf = metplus_config conf.set('config', 'BOTH_VAR1_NAME', "NAME1") conf.set('config', 'BOTH_VAR1_LEVELS', "LEVELS11, LEVELS12") conf.set('config', 'BOTH_VAR2_NAME', "NAME2") @@ -512,7 +512,7 @@ def test_parse_var_list_both(metplus_config, data_type, list_created): # field info defined in both FCST_* and OBS_* variables @pytest.mark.util def test_parse_var_list_fcst_and_obs(metplus_config): - conf = metplus_config() + conf = metplus_config conf.set('config', 'FCST_VAR1_NAME', "FNAME1") conf.set('config', 'FCST_VAR1_LEVELS', "FLEVELS11, FLEVELS12") conf.set('config', 'FCST_VAR2_NAME', "FNAME2") @@ -549,7 +549,7 @@ def test_parse_var_list_fcst_and_obs(metplus_config): # VAR1 defined by FCST, VAR2 defined by OBS @pytest.mark.util def test_parse_var_list_fcst_and_obs_alternate(metplus_config): - conf = metplus_config() + conf = metplus_config conf.set('config', 'FCST_VAR1_NAME', "FNAME1") conf.set('config', 'FCST_VAR1_LEVELS', "FLEVELS11, FLEVELS12") conf.set('config', 'OBS_VAR2_NAME', "ONAME2") @@ -569,7 +569,7 @@ def test_parse_var_list_fcst_and_obs_alternate(metplus_config): ) @pytest.mark.util def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_len, name_levels): - conf = metplus_config() + conf = metplus_config conf.set('config', 'OBS_VAR1_NAME', "ONAME1") conf.set('config', 'OBS_VAR1_LEVELS', "OLEVELS11, OLEVELS12") conf.set('config', 'FCST_VAR2_NAME', "FNAME2") @@ -619,7 +619,7 @@ def test_parse_var_list_fcst_and_obs_and_both(metplus_config, data_type, list_le ) @pytest.mark.util def test_parse_var_list_fcst_only_options(metplus_config, data_type, list_len): - conf = metplus_config() + conf = metplus_config conf.set('config', 'FCST_VAR1_NAME', "NAME1") conf.set('config', 'FCST_VAR1_LEVELS', "LEVELS11, LEVELS12") conf.set('config', 'FCST_VAR1_THRESH', ">1, >2") @@ -643,7 +643,7 @@ def test_parse_var_list_fcst_only_options(metplus_config, data_type, list_len): ) @pytest.mark.util def test_find_var_indices_wrapper_specific(metplus_config, met_tool, indices): - conf = metplus_config() + conf = metplus_config data_type = 'FCST' conf.set('config', f'{data_type}_VAR1_NAME', "NAME1") conf.set('config', f'{data_type}_GRID_STAT_VAR2_NAME', "GSNAME2") @@ -659,7 +659,7 @@ def test_find_var_indices_wrapper_specific(metplus_config, met_tool, indices): # works as expected @pytest.mark.util def test_parse_var_list_ensemble(metplus_config): - config = metplus_config() + config = metplus_config config.set('config', 'ENS_VAR1_NAME', 'APCP') config.set('config', 'ENS_VAR1_LEVELS', 'A24') config.set('config', 'ENS_VAR1_THRESH', '>0.0, >=10.0') @@ -750,7 +750,7 @@ def test_parse_var_list_ensemble(metplus_config): @pytest.mark.util def test_parse_var_list_series_by(metplus_config): - config = metplus_config() + config = metplus_config config.set('config', 'BOTH_EXTRACT_TILES_VAR1_NAME', 'RH') config.set('config', 'BOTH_EXTRACT_TILES_VAR1_LEVELS', 'P850, P700') config.set('config', 'BOTH_EXTRACT_TILES_VAR1_OUTPUT_NAMES', @@ -838,7 +838,7 @@ def test_parse_var_list_priority_fcst(metplus_config): # process again until all items have been popped. # This will check that list is in priority order while(priority_list): - config = metplus_config() + config = metplus_config for key in priority_list: config.set('config', key, key.lower()) @@ -856,7 +856,7 @@ def test_parse_var_list_priority_fcst(metplus_config): # wrapper specific field info variables are specified @pytest.mark.util def test_parse_var_list_wrapper_specific(metplus_config): - conf = metplus_config() + conf = metplus_config conf.set('config', 'FCST_VAR1_NAME', "ENAME1") conf.set('config', 'FCST_VAR1_LEVELS', "ELEVELS11, ELEVELS12") conf.set('config', 'FCST_VAR2_NAME', "ENAME2") @@ -942,7 +942,7 @@ def test_parse_var_list_wrapper_specific(metplus_config): @pytest.mark.util def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, expected_results): - config = metplus_config() + config = metplus_config for key, value in config_overrides.items(): config.set('config', key, value) @@ -999,7 +999,7 @@ def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, ) @pytest.mark.util def test_get_process_list(metplus_config, input_list, expected_list): - conf = metplus_config() + conf = metplus_config conf.set('config', 'PROCESS_LIST', input_list) process_list = config_metplus.get_process_list(conf) output_list = [item[0] for item in process_list] @@ -1033,7 +1033,7 @@ def test_get_process_list(metplus_config, input_list, expected_list): ) @pytest.mark.util def test_get_process_list_instances(metplus_config, input_list, expected_list): - conf = metplus_config() + conf = metplus_config conf.set('config', 'PROCESS_LIST', input_list) output_list = config_metplus.get_process_list(conf) assert output_list == expected_list @@ -1044,7 +1044,7 @@ def test_getraw_sub_and_nosub(metplus_config): raw_string = '{MODEL}_{CURRENT_FCST_NAME}' sub_actual = 'FCST_NAME' - config = metplus_config() + config = metplus_config config.set('config', 'MODEL', 'FCST') config.set('config', 'CURRENT_FCST_NAME', 'NAME') config.set('config', 'OUTPUT_PREFIX', raw_string) @@ -1062,7 +1062,7 @@ def test_getraw_instance_with_unset_var(metplus_config): """ pytest.skip() instance = 'my_section' - config = metplus_config() + config = metplus_config config.set('config', 'MODEL', 'FCST') config.add_section(instance) diff --git a/internal/tests/pytests/util/logging/test_logging.py b/internal/tests/pytests/util/logging/test_logging.py index 68eca3262d..085b33aac9 100644 --- a/internal/tests/pytests/util/logging/test_logging.py +++ b/internal/tests/pytests/util/logging/test_logging.py @@ -10,7 +10,7 @@ @pytest.mark.util def test_log_level(metplus_config): # Verify that the log level is set to what we indicated in the config file. - config = metplus_config() + config = metplus_config fixture_logger = config.logger # Expecting log level = INFO as set in the test config file. level = logging.getLevelName('INFO') @@ -20,7 +20,7 @@ def test_log_level(metplus_config): @pytest.mark.util def test_log_level_key(metplus_config): # Verify that the LOG_LEVEL key is in the config file - config_instance = metplus_config() + config_instance = metplus_config section = 'config' option = 'LOG_LEVEL' assert config_instance.has_option(section, option) @@ -29,7 +29,7 @@ def test_log_level_key(metplus_config): @pytest.mark.util def test_logdir_exists(metplus_config): # Verify that the expected log dir exists. - config = metplus_config() + config = metplus_config log_dir = config.get('config', 'LOG_DIR') # Verify that a logfile exists in the log dir, with a filename # like {LOG_DIR}/metplus.YYYYMMDD.log @@ -40,7 +40,7 @@ def test_logdir_exists(metplus_config): def test_logfile_exists(metplus_config): # Verify that a logfile with format metplus.log exists # We are assuming that there can be numerous files in the log directory. - config = metplus_config() + config = metplus_config log_dir = config.get('config', 'LOG_DIR') # Only check for the log file if the log directory is present if os.path.exists(log_dir): diff --git a/internal/tests/pytests/util/met_config/test_met_config.py b/internal/tests/pytests/util/met_config/test_met_config.py index 0f3adb6587..e00e5f2e9b 100644 --- a/internal/tests/pytests/util/met_config/test_met_config.py +++ b/internal/tests/pytests/util/met_config/test_met_config.py @@ -36,7 +36,7 @@ def test_read_climo_field(metplus_config, config_overrides, expected_value): app_name = 'app' for climo_type in ('MEAN', 'STDEV'): expected_var = f'{app_name}_CLIMO_{climo_type}_FIELD'.upper() - config = metplus_config() + config = metplus_config # set config values for key, value in config_overrides.items(): @@ -135,7 +135,7 @@ def test_handle_climo_dict(metplus_config, config_overrides, expected_value): app_name = 'app' for climo_type in ('MEAN', 'STDEV'): expected_var = f'METPLUS_CLIMO_{climo_type}_DICT' - config = metplus_config() + config = metplus_config output_dict = {} # set config values @@ -252,7 +252,7 @@ def test_read_climo_file_name(metplus_config, config_overrides, for climo_type in CLIMO_TYPES: prefix = f'{app_name.upper()}_CLIMO_{climo_type.upper()}_' - config = metplus_config() + config = metplus_config # set config values for key, value in config_overrides.items(): diff --git a/internal/tests/pytests/util/met_util/test_met_util.py b/internal/tests/pytests/util/met_util/test_met_util.py index 481d4f9d46..784d4b0345 100644 --- a/internal/tests/pytests/util/met_util/test_met_util.py +++ b/internal/tests/pytests/util/met_util/test_met_util.py @@ -98,7 +98,7 @@ def test_get_threshold_via_regex(key, value): ) @pytest.mark.util def test_preprocess_file_stage(metplus_config, filename, ext): - conf = metplus_config() + conf = metplus_config metplus_base = conf.getdir('METPLUS_BASE') stage_dir = conf.getdir('STAGING_DIR', os.path.join(conf.getdir('OUTPUT_BASE'), @@ -140,7 +140,7 @@ def test_preprocess_file_options(metplus_config, data_type, allow_dir, expected): - config = metplus_config() + config = metplus_config if filename == 'dir': filename = config.getdir('METPLUS_BASE') expected = filename @@ -150,7 +150,7 @@ def test_preprocess_file_options(metplus_config, def test_get_lead_sequence_lead(metplus_config): input_dict = {'valid': datetime.datetime(2019, 2, 1, 13)} - conf = metplus_config() + conf = metplus_config conf.set('config', 'LEAD_SEQ', "3,6,9,12") test_seq = util.get_lead_sequence(conf, input_dict) hour_seq = [] @@ -177,7 +177,7 @@ def test_get_lead_sequence_lead(metplus_config): @pytest.mark.util def test_get_lead_sequence_lead_list(metplus_config, key, value): input_dict = { 'valid' : datetime.datetime(2019, 2, 1, 13) } - conf = metplus_config() + conf = metplus_config conf.set('config', 'LEAD_SEQ', key) test_seq = util.get_lead_sequence(conf, input_dict) hour_seq = [] @@ -222,7 +222,7 @@ def test_get_lead_sequence_lead_list(metplus_config, key, value): ) @pytest.mark.util def test_get_lead_sequence_groups(metplus_config, config_dict, expected_list): - config = metplus_config() + config = metplus_config for key, value in config_dict.items(): config.set('config', key, value) @@ -268,7 +268,7 @@ def test_get_lead_sequence_groups(metplus_config, config_dict, expected_list): @pytest.mark.util def test_get_lead_sequence_init(metplus_config, current_hour, lead_seq): input_dict = {'valid': datetime.datetime(2019, 2, 1, current_hour)} - conf = metplus_config() + conf = metplus_config conf.set('config', 'INIT_SEQ', "0, 12") conf.set('config', 'LEAD_SEQ_MAX', 36) test_seq = util.get_lead_sequence(conf, input_dict) @@ -278,7 +278,7 @@ def test_get_lead_sequence_init(metplus_config, current_hour, lead_seq): @pytest.mark.util def test_get_lead_sequence_init_min_10(metplus_config): input_dict = {'valid': datetime.datetime(2019, 2, 1, 12)} - conf = metplus_config() + conf = metplus_config conf.set('config', 'INIT_SEQ', "0, 12") conf.set('config', 'LEAD_SEQ_MAX', 24) conf.set('config', 'LEAD_SEQ_MIN', 10) @@ -343,7 +343,7 @@ def test_round_0p5(value, expected_result): ) @pytest.mark.util def test_get_skip_times(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config() + conf = metplus_config conf.set('config', 'SKIP_TIMES', skip_times_conf) assert util.get_skip_times(conf) == expected_dict @@ -362,7 +362,7 @@ def test_get_skip_times(metplus_config, skip_times_conf, expected_dict): ) @pytest.mark.util def test_get_skip_times_wrapper(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config() + conf = metplus_config # set wrapper specific skip times, then ensure it is found conf.set('config', 'GRID_STAT_SKIP_TIMES', skip_times_conf) @@ -383,7 +383,7 @@ def test_get_skip_times_wrapper(metplus_config, skip_times_conf, expected_dict): ) @pytest.mark.util def test_get_skip_times_wrapper_not_used(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config() + conf = metplus_config # set generic SKIP_TIMES, then request grid_stat to ensure it uses generic conf.set('config', 'SKIP_TIMES', skip_times_conf) @@ -485,7 +485,7 @@ def test_subset_list(subset_definition, expected_result): ) @pytest.mark.util def test_get_storm_ids(metplus_config, filename, expected_result): - config = metplus_config() + config = metplus_config filepath = os.path.join(config.getdir('METPLUS_BASE'), 'internal', 'tests', 'data', @@ -514,7 +514,7 @@ def test_get_storm_ids(metplus_config, filename, expected_result): @pytest.mark.util def test_get_storms(metplus_config, filename, expected_result): storm_id_index = 4 - config = metplus_config() + config = metplus_config filepath = os.path.join(config.getdir('METPLUS_BASE'), 'internal', 'tests', 'data', @@ -543,7 +543,7 @@ def test_get_storms_mtd(metplus_config): 'CO001' ] sort_column = 'OBJECT_CAT' - config = metplus_config() + config = metplus_config filepath = os.path.join(config.getdir('METPLUS_BASE'), 'internal', 'tests', 'data', @@ -644,7 +644,7 @@ def test_format_level(level, expected_result): ) @pytest.mark.util def test_sub_var_list(metplus_config, input_dict, expected_list): - config = metplus_config() + config = metplus_config config.set('config', 'FCST_VAR1_NAME', 'FNAME_{init?fmt=%Y}') config.set('config', 'FCST_VAR1_LEVELS', 'Z{init?fmt=%H}, Z{valid?fmt=%H}') config.set('config', 'OBS_VAR1_NAME', 'ONAME_{init?fmt=%Y}') diff --git a/internal/tests/pytests/util/time_looping/test_time_looping.py b/internal/tests/pytests/util/time_looping/test_time_looping.py index 13ae967c5e..2503ebb71c 100644 --- a/internal/tests/pytests/util/time_looping/test_time_looping.py +++ b/internal/tests/pytests/util/time_looping/test_time_looping.py @@ -11,7 +11,7 @@ def test_get_start_and_end_times(metplus_config): end_time = '2018103109' time_format = '%Y%m%d%H' for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', time_format) config.set('config', f'{prefix}_BEG', start_time) @@ -25,7 +25,7 @@ def test_get_start_and_end_times(metplus_config): def test_get_start_and_end_times_now(metplus_config): time_format = '%Y%m%d%H%M%S' for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', time_format) config.set('config', f'{prefix}_BEG', '{now?fmt=%Y%m%d%H%M%S?shift=-1d}') @@ -43,7 +43,7 @@ def test_get_start_and_end_times_now(metplus_config): def test_get_start_and_end_times_today(metplus_config): time_format = '%Y%m%d' for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', time_format) config.set('config', f'{prefix}_BEG', '{today}') @@ -60,7 +60,7 @@ def test_get_start_and_end_times_today(metplus_config): @pytest.mark.util def test_time_generator_list(metplus_config): for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', '%Y%m%d%H') config.set('config', f'{prefix}_LIST', '2021020104, 2021103121') @@ -83,7 +83,7 @@ def test_time_generator_list(metplus_config): @pytest.mark.util def test_time_generator_increment(metplus_config): for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', '%Y%m%d%H') config.set('config', f'{prefix}_BEG', '2021020104') @@ -121,7 +121,7 @@ def test_time_generator_error_check(metplus_config): """ time_fmt = '%Y%m%d%H' for prefix in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config # unset LOOP_BY assert next(time_generator(config)) is None @@ -153,7 +153,7 @@ def test_time_generator_error_check(metplus_config): assert next(time_generator(config))[prefix.lower()] == expected_time # get a fresh config object to test BEG/END configurations - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', prefix) config.set('config', f'{prefix}_TIME_FMT', time_fmt) diff --git a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py index ace63c816d..4065c0dbe2 100644 --- a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py @@ -8,16 +8,8 @@ from metplus.wrappers.ascii2nc_wrapper import ASCII2NCWrapper -def ascii2nc_wrapper(metplus_config, config_path=None, config_overrides=None): - config = metplus_config([config_path]) - # config = metplus_config() - # - # if config_path: - # config = metplus_config([config_path]) - # parm_base = config.getdir('PARM_BASE') - # config_full_path = os.path.join(parm_base, config_path) - # config = metplus_config([config_full_path]) - +def ascii2nc_wrapper(metplus_config_files, config_path=None, config_overrides=None): + config = metplus_config_files([config_path]) overrides = {'DO_NOT_RUN_EXE': True, 'INPUT_MUST_EXIST': False} if config_overrides: @@ -142,10 +134,10 @@ def ascii2nc_wrapper(metplus_config, config_path=None, config_overrides=None): ] ) @pytest.mark.wrapper -def test_ascii2nc_wrapper(metplus_config, config_overrides, +def test_ascii2nc_wrapper(metplus_config_files, config_overrides, env_var_values): wrapper = ( - ascii2nc_wrapper(metplus_config, + ascii2nc_wrapper(metplus_config_files, 'use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf', config_overrides) ) @@ -189,15 +181,15 @@ def test_ascii2nc_wrapper(metplus_config, config_overrides, assert env_var_values.get(env_var_key, '') == value - #output_base = wrapper.config.getdir('OUTPUT_BASE') - #if output_base: - # shutil.rmtree(output_base) + output_base = wrapper.config.getdir('OUTPUT_BASE') + if output_base: + shutil.rmtree(output_base) @pytest.mark.wrapper def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config config.set('config', 'INPUT_MUST_EXIST', False) wrapper = ASCII2NCWrapper(config) @@ -206,7 +198,3 @@ def test_get_config_file(metplus_config): config.set('config', 'ASCII2NC_CONFIG_FILE', fake_config_name) wrapper = ASCII2NCWrapper(config) assert wrapper.c_dict['CONFIG_FILE'] == fake_config_name - - #output_base = wrapper.config.getdir('OUTPUT_BASE') - #if output_base: - # shutil.rmtree(output_base) diff --git a/internal/tests/pytests/wrappers/command_builder/test_command_builder.py b/internal/tests/pytests/wrappers/command_builder/test_command_builder.py index fa95f7e037..c62609b983 100644 --- a/internal/tests/pytests/wrappers/command_builder/test_command_builder.py +++ b/internal/tests/pytests/wrappers/command_builder/test_command_builder.py @@ -24,7 +24,7 @@ def get_data_dir(config): ) @pytest.mark.wrapper def test_find_data_no_dated(metplus_config, data_type): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -55,7 +55,7 @@ def test_find_data_no_dated(metplus_config, data_type): ) @pytest.mark.wrapper def test_find_data_not_a_path(metplus_config, data_type): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) task_info = {} @@ -73,7 +73,7 @@ def test_find_data_not_a_path(metplus_config, data_type): @pytest.mark.wrapper def test_find_obs_no_dated(metplus_config): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -93,7 +93,7 @@ def test_find_obs_no_dated(metplus_config): @pytest.mark.wrapper def test_find_obs_dated(metplus_config): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -123,7 +123,7 @@ def test_find_obs_dated(metplus_config): ) @pytest.mark.wrapper def test_find_obs_offset(metplus_config, offsets, expected_file, offset_seconds): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -150,7 +150,7 @@ def test_find_obs_offset(metplus_config, offsets, expected_file, offset_seconds) @pytest.mark.wrapper def test_find_obs_dated_previous_day(metplus_config): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -170,7 +170,7 @@ def test_find_obs_dated_previous_day(metplus_config): @pytest.mark.wrapper def test_find_obs_dated_next_day(metplus_config): - config = metplus_config() + config = metplus_config pcw = CommandBuilder(config) v = {} @@ -209,7 +209,7 @@ def test_find_obs_dated_next_day(metplus_config): ) @pytest.mark.wrapper def test_override_by_instance(metplus_config, section_items): - config = metplus_config() + config = metplus_config # set config variables to default for key in section_items: @@ -240,7 +240,7 @@ def test_override_by_instance(metplus_config, section_items): ) @pytest.mark.wrapper def test_write_list_file(metplus_config, filename, file_list, output_dir): - config = metplus_config() + config = metplus_config cbw = CommandBuilder(config) # use output_dir relative to OUTPUT_BASE if it is specified @@ -290,7 +290,7 @@ def test_write_list_file(metplus_config, filename, file_list, output_dir): ) @pytest.mark.wrapper def test_handle_description(metplus_config, config_overrides, expected_value): - config = metplus_config() + config = metplus_config # set config values for key, value in config_overrides.items(): @@ -332,7 +332,7 @@ def test_handle_description(metplus_config, config_overrides, expected_value): @pytest.mark.wrapper def test_handle_regrid_old(metplus_config, config_overrides, set_to_grid, expected_dict): - config = metplus_config() + config = metplus_config # set config values for key, value in config_overrides.items(): @@ -381,7 +381,7 @@ def test_handle_regrid_old(metplus_config, config_overrides, set_to_grid, ) @pytest.mark.wrapper def test_handle_regrid_new(metplus_config, config_overrides, expected_output): - config = metplus_config() + config = metplus_config # set config values for key, value in config_overrides.items(): @@ -418,7 +418,7 @@ def test_handle_regrid_new(metplus_config, config_overrides, expected_output): @pytest.mark.wrapper def test_add_met_config_string(metplus_config, mp_config_name, met_config_name, c_dict_key, remove_quotes, expected_output): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_STRING_1', 'value_1') @@ -472,7 +472,7 @@ def test_add_met_config_string(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_bool(metplus_config, mp_config_name, met_config_name, c_dict_key, uppercase, expected_output, is_ok): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_BOOL_1', True) @@ -520,7 +520,7 @@ def test_add_met_config_bool(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_int(metplus_config, mp_config_name, met_config_name, c_dict_key, expected_output, is_ok): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_INT_1', 7) @@ -563,7 +563,7 @@ def test_add_met_config_int(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_float(metplus_config, mp_config_name, met_config_name, c_dict_key, expected_output, is_ok): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_FLOAT_1', 7.0) @@ -612,7 +612,7 @@ def test_add_met_config_float(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_thresh(metplus_config, mp_config_name, met_config_name, c_dict_key, expected_output, is_ok): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_THRESH_1', 'gt74') @@ -664,7 +664,7 @@ def test_add_met_config_thresh(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_list(metplus_config, mp_config_name, met_config_name, c_dict_key, remove_quotes, expected_output): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_LIST_1', 'value_1, value2') @@ -705,7 +705,7 @@ def test_add_met_config_list(metplus_config, mp_config_name, met_config_name, @pytest.mark.wrapper def test_add_met_config_list_allow_empty(metplus_config, mp_config_name, allow_empty, expected_output): - cbw = CommandBuilder(metplus_config()) + cbw = CommandBuilder(metplus_config) # set some config variables to test cbw.config.set('config', 'TEST_LIST_1', '') @@ -731,7 +731,7 @@ def test_add_met_config_dict(metplus_config): end = 5 expected_value = f'{dict_name} = {{beg = -3;end = 5;}}' - config = metplus_config() + config = metplus_config config.set('config', 'TC_GEN_FCST_HR_WINDOW_BEG', beg) config.set('config', 'TC_GEN_FCST_HR_WINDOW_END', end) cbw = CommandBuilder(config) @@ -755,7 +755,7 @@ def test_add_met_config_window(metplus_config): end = 5 expected_value = f'{dict_name} = {{beg = -3;end = 5;}}' - config = metplus_config() + config = metplus_config config.set('config', 'TC_GEN_FCST_HR_WINDOW_BEG', beg) config.set('config', 'TC_GEN_FCST_HR_WINDOW_END', end) cbw = CommandBuilder(config) @@ -769,7 +769,7 @@ def test_add_met_config_window(metplus_config): @pytest.mark.wrapper def test_add_met_config(metplus_config): - config = metplus_config() + config = metplus_config value = 5 config.set('config', 'TC_GEN_VALID_FREQUENCY', value) cbw = CommandBuilder(config) @@ -795,7 +795,7 @@ def test_add_met_config_dict_nested(metplus_config): f'{{var1 = {sub_dict_value1};var2 = {sub_dict_value2};}}}}' ) - config = metplus_config() + config = metplus_config config.set('config', 'APP_OUTER_BEG', beg) config.set('config', 'APP_OUTER_END', end) config.set('config', 'APP_OUTER_INNER_VAR1', sub_dict_value1) @@ -832,7 +832,7 @@ def test_get_field_info_extra(metplus_config, extra, expected_value): d_type = 'FCST' name = 'name' level = '"(*,*)"' - config = metplus_config() + config = metplus_config wrapper = CommandBuilder(config) actual_value = wrapper.get_field_info( d_type=d_type, diff --git a/internal/tests/pytests/wrappers/compare_gridded/test_compare_gridded.py b/internal/tests/pytests/wrappers/compare_gridded/test_compare_gridded.py index 82b4fa2059..0eaf8d5ab9 100644 --- a/internal/tests/pytests/wrappers/compare_gridded/test_compare_gridded.py +++ b/internal/tests/pytests/wrappers/compare_gridded/test_compare_gridded.py @@ -13,7 +13,7 @@ def compare_gridded_wrapper(metplus_config): files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - config = metplus_config() + config = metplus_config return CompareGriddedWrapper(config) diff --git a/internal/tests/pytests/wrappers/ensemble_stat/test_ensemble_stat_wrapper.py b/internal/tests/pytests/wrappers/ensemble_stat/test_ensemble_stat_wrapper.py index 7a745cb92a..9328c3112a 100644 --- a/internal/tests/pytests/wrappers/ensemble_stat/test_ensemble_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/ensemble_stat/test_ensemble_stat_wrapper.py @@ -80,7 +80,7 @@ def set_minimum_config_settings(config, set_fields=True): def test_ensemble_stat_field_info(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config, set_fields=False) @@ -136,7 +136,7 @@ def test_handle_climo_file_variables(metplus_config, config_overrides, """ old_env_vars = ['CLIMO_MEAN_FILE', 'CLIMO_STDEV_FILE'] - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -564,7 +564,7 @@ def test_handle_climo_file_variables(metplus_config, config_overrides, def test_ensemble_stat_single_field(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -619,7 +619,7 @@ def test_ensemble_stat_single_field(metplus_config, config_overrides, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'EnsembleStatConfig_wrapped') @@ -641,7 +641,7 @@ def test_get_config_file(metplus_config): @pytest.mark.wrapper_c def test_ensemble_stat_fill_missing(metplus_config, config_overrides, expected_num_files): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) diff --git a/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py b/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py index 41d54886f0..2bcf588e99 100644 --- a/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py +++ b/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py @@ -8,15 +8,15 @@ from metplus.wrappers.extract_tiles_wrapper import ExtractTilesWrapper -def get_config(metplus_config): +def get_config(metplus_config_files): extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'extract_tiles_test.conf')) - return metplus_config(extra_configs) + return metplus_config_files(extra_configs) -def extract_tiles_wrapper(metplus_config): - config = get_config(metplus_config) +def extract_tiles_wrapper(metplus_config_files): + config = get_config(metplus_config_files) config.set('config', 'LOOP_ORDER', 'processes') wrapper = ExtractTilesWrapper(config) @@ -60,8 +60,8 @@ def get_input_lines(filepath): ] ) @pytest.mark.wrapper -def test_get_object_indices(metplus_config, object_cats, expected_indices): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_object_indices(metplus_config_files, object_cats, expected_indices): + wrapper = extract_tiles_wrapper(metplus_config_files) assert wrapper.get_object_indices(object_cats) == expected_indices @@ -79,8 +79,8 @@ def test_get_object_indices(metplus_config, object_cats, expected_indices): ] ) @pytest.mark.wrapper -def test_get_header_indices(metplus_config,header_name, index): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_header_indices(metplus_config_files,header_name, index): + wrapper = extract_tiles_wrapper(metplus_config_files) header = get_storm_lines(wrapper)[0] idx_dict = wrapper.get_header_indices(header) assert(idx_dict[header_name] == index) @@ -98,8 +98,8 @@ def test_get_header_indices(metplus_config,header_name, index): ] ) @pytest.mark.wrapper -def test_get_header_indices_mtd(metplus_config, header_name, index): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_header_indices_mtd(metplus_config_files, header_name, index): + wrapper = extract_tiles_wrapper(metplus_config_files) header = get_mtd_lines(wrapper)[0] idx_dict = wrapper.get_header_indices(header, 'MTD') assert(idx_dict[header_name] == index) @@ -119,8 +119,8 @@ def test_get_header_indices_mtd(metplus_config, header_name, index): ] ) @pytest.mark.wrapper -def test_get_data_from_track_line(metplus_config, header_name, value): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_data_from_track_line(metplus_config_files, header_name, value): + wrapper = extract_tiles_wrapper(metplus_config_files) storm_lines = get_storm_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header) @@ -140,8 +140,8 @@ def test_get_data_from_track_line(metplus_config, header_name, value): ] ) @pytest.mark.wrapper -def test_get_data_from_track_line_mtd(metplus_config, header_name, value): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_data_from_track_line_mtd(metplus_config_files, header_name, value): + wrapper = extract_tiles_wrapper(metplus_config_files) storm_lines = get_mtd_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header, 'MTD') @@ -150,9 +150,9 @@ def test_get_data_from_track_line_mtd(metplus_config, header_name, value): @pytest.mark.wrapper -def test_set_time_info_from_track_data(metplus_config): +def test_set_time_info_from_track_data(metplus_config_files): storm_id = 'ML1221072014' - wrapper = extract_tiles_wrapper(metplus_config) + wrapper = extract_tiles_wrapper(metplus_config_files) storm_lines = get_storm_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header) @@ -174,8 +174,8 @@ def test_set_time_info_from_track_data(metplus_config): ] ) @pytest.mark.wrapper -def test_get_grid_info(metplus_config, lat, lon, expected_result): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_grid_info(metplus_config_files, lat, lon, expected_result): + wrapper = extract_tiles_wrapper(metplus_config_files) assert(wrapper.get_grid_info(lat, lon, 'FCST') == expected_result) @@ -186,7 +186,7 @@ def test_get_grid_info(metplus_config, lat, lon, expected_result): ] ) @pytest.mark.wrapper -def test_get_grid(metplus_config, lat, lon, expected_result): - wrapper = extract_tiles_wrapper(metplus_config) +def test_get_grid(metplus_config_files, lat, lon, expected_result): + wrapper = extract_tiles_wrapper(metplus_config_files) storm_data = {'ALAT': lat, 'ALON': lon} assert(wrapper.get_grid('FCST', storm_data) == expected_result) diff --git a/internal/tests/pytests/wrappers/gen_ens_prod/test_gen_ens_prod_wrapper.py b/internal/tests/pytests/wrappers/gen_ens_prod/test_gen_ens_prod_wrapper.py index f8ed2b4639..49990fc528 100644 --- a/internal/tests/pytests/wrappers/gen_ens_prod/test_gen_ens_prod_wrapper.py +++ b/internal/tests/pytests/wrappers/gen_ens_prod/test_gen_ens_prod_wrapper.py @@ -362,7 +362,7 @@ def handle_input_dir(config): def test_gen_ens_prod_single_field(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -440,7 +440,7 @@ def test_gen_ens_prod_single_field(metplus_config, config_overrides, ) @pytest.mark.wrapper def test_get_config_file(metplus_config, use_default_config_file): - config = metplus_config() + config = metplus_config if use_default_config_file: config_file = os.path.join(config.getdir('PARM_BASE'), @@ -463,7 +463,7 @@ def test_get_config_file(metplus_config, use_default_config_file): @pytest.mark.wrapper def test_gen_ens_prod_fill_missing(metplus_config, config_overrides, expected_num_files): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) handle_input_dir(config) diff --git a/internal/tests/pytests/wrappers/gen_vx_mask/test_gen_vx_mask.py b/internal/tests/pytests/wrappers/gen_vx_mask/test_gen_vx_mask.py index b584f75499..fa1d660f90 100644 --- a/internal/tests/pytests/wrappers/gen_vx_mask/test_gen_vx_mask.py +++ b/internal/tests/pytests/wrappers/gen_vx_mask/test_gen_vx_mask.py @@ -16,7 +16,7 @@ def gen_vx_mask_wrapper(metplus_config): files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) return GenVxMaskWrapper(config) diff --git a/internal/tests/pytests/wrappers/grid_diag/test_grid_diag.py b/internal/tests/pytests/wrappers/grid_diag/test_grid_diag.py index 61fd599150..72e3a878d7 100644 --- a/internal/tests/pytests/wrappers/grid_diag/test_grid_diag.py +++ b/internal/tests/pytests/wrappers/grid_diag/test_grid_diag.py @@ -68,7 +68,7 @@ def test_get_all_files_and_subset(metplus_config, time_info, expected_subset): """! Test to ensure that get_all_files only gets the files that are relevant to the runtime settings and not every file in the directory """ - config = metplus_config() + config = metplus_config config.set('config', 'LOOP_BY', 'INIT') config.set('config', 'GRID_DIAG_RUNTIME_FREQ', 'RUN_ONCE') config.set('config', 'INIT_TIME_FMT', '%Y%m%d%H%M%S') @@ -169,7 +169,7 @@ def test_get_all_files_and_subset(metplus_config, time_info, expected_subset): ) @pytest.mark.wrapper def test_get_list_file_name(metplus_config, time_info, expected_filename): - wrapper = GridDiagWrapper(metplus_config()) + wrapper = GridDiagWrapper(metplus_config) assert(wrapper.get_list_file_name(time_info, 'input0') == expected_filename) @@ -177,7 +177,7 @@ def test_get_list_file_name(metplus_config, time_info, expected_filename): def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'GridDiagConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py b/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py index c10d914872..9a0caed0b7 100644 --- a/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py @@ -88,7 +88,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper_b def test_grid_stat_is_prob(metplus_config, config_overrides, expected_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -132,7 +132,7 @@ def test_handle_climo_file_variables(metplus_config, config_overrides, """ old_env_vars = ['CLIMO_MEAN_FILE', 'CLIMO_STDEV_FILE'] - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -695,7 +695,7 @@ def test_handle_climo_file_variables(metplus_config, config_overrides, def test_grid_stat_single_field(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -750,7 +750,7 @@ def test_grid_stat_single_field(metplus_config, config_overrides, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'GridStatConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/ioda2nc/test_ioda2nc_wrapper.py b/internal/tests/pytests/wrappers/ioda2nc/test_ioda2nc_wrapper.py index 1e06c77152..99a98db0fc 100644 --- a/internal/tests/pytests/wrappers/ioda2nc/test_ioda2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/ioda2nc/test_ioda2nc_wrapper.py @@ -186,7 +186,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper def test_ioda2nc_wrapper(metplus_config, config_overrides, env_var_values, extra_args): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -238,7 +238,7 @@ def test_ioda2nc_wrapper(metplus_config, config_overrides, @pytest.mark.wrapper def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config config.set('config', 'INPUT_MUST_EXIST', False) wrapper = IODA2NCWrapper(config) diff --git a/internal/tests/pytests/wrappers/mode/test_mode_wrapper.py b/internal/tests/pytests/wrappers/mode/test_mode_wrapper.py index 6f31714186..1ca480b49d 100644 --- a/internal/tests/pytests/wrappers/mode/test_mode_wrapper.py +++ b/internal/tests/pytests/wrappers/mode/test_mode_wrapper.py @@ -318,7 +318,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper_a def test_mode_single_field(metplus_config, config_overrides, expected_output): - config = metplus_config() + config = metplus_config # set config variables needed to run set_minimum_config_settings(config) @@ -401,7 +401,7 @@ def test_mode_single_field(metplus_config, config_overrides, @pytest.mark.wrapper_a def test_mode_multi_variate(metplus_config, config_overrides, expected_output): - config = metplus_config() + config = metplus_config # set config variables needed to run set_minimum_config_settings(config) @@ -518,7 +518,7 @@ def test_config_synonyms(metplus_config, config_name, env_var_name, elif var_type == 'float': in_value = out_value = 4.0 - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) config.set('config', config_name, in_value) wrapper = MODEWrapper(config) @@ -533,7 +533,7 @@ def test_config_synonyms(metplus_config, config_name, env_var_name, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'MODEConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/mtd/test_mtd_wrapper.py b/internal/tests/pytests/wrappers/mtd/test_mtd_wrapper.py index 135c62031f..420e6d40fc 100644 --- a/internal/tests/pytests/wrappers/mtd/test_mtd_wrapper.py +++ b/internal/tests/pytests/wrappers/mtd/test_mtd_wrapper.py @@ -19,7 +19,7 @@ def mtd_wrapper(metplus_config, lead_seq=None): files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) config.set('config', 'BOTH_VAR1_NAME', 'APCP') config.set('config', 'BOTH_VAR1_LEVELS', 'A06') @@ -195,7 +195,7 @@ def test_mtd_single(metplus_config): def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'MTDConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py b/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py index 9f98f0d80e..65af743f68 100644 --- a/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py @@ -11,7 +11,7 @@ from metplus.util import do_string_sub -def pb2nc_wrapper(metplus_config): +def pb2nc_wrapper(metplus_config_files): """! Returns a default PB2NCWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration @@ -21,7 +21,7 @@ def pb2nc_wrapper(metplus_config): # the pb2nc_test.conf file. extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'conf1')) - config = metplus_config(extra_configs) + config = metplus_config_files(extra_configs) return PB2NCWrapper(config) @@ -35,8 +35,8 @@ def pb2nc_wrapper(metplus_config): ] ) @pytest.mark.wrapper -def test_find_and_check_output_file_skip(metplus_config, exists, skip, run): - pb = pb2nc_wrapper(metplus_config) +def test_find_and_check_output_file_skip(metplus_config_files, exists, skip, run): + pb = pb2nc_wrapper(metplus_config_files) exist_file = 'wackyfilenametocreate' non_exist_file = 'wackyfilethatdoesntexist' @@ -75,8 +75,8 @@ def test_find_and_check_output_file_skip(metplus_config, exists, skip, run): ] ) @pytest.mark.wrapper -def test_get_command(metplus_config, infiles): - pb = pb2nc_wrapper(metplus_config) +def test_get_command(metplus_config_files, infiles): + pb = pb2nc_wrapper(metplus_config_files) pb.outfile = 'outfilename.txt' pb.outdir = pb.config.getdir('OUTPUT_BASE') outpath = os.path.join(pb.outdir, pb.outfile) @@ -109,8 +109,8 @@ def test_get_command(metplus_config, infiles): ] ) @pytest.mark.wrapper -def test_find_input_files(metplus_config, offsets, offset_to_find): - pb = pb2nc_wrapper(metplus_config) +def test_find_input_files(metplus_config_files, offsets, offset_to_find): + pb = pb2nc_wrapper(metplus_config_files) # for valid 20190201_12, offsets 3 and 5, create files to find # in the fake input directory based on input template input_dict = { 'valid' : datetime.datetime(2019, 2, 1, 12) } @@ -271,7 +271,7 @@ def test_find_input_files(metplus_config, offsets, offset_to_find): def test_pb2nc_all_fields(metplus_config, config_overrides, env_var_values): input_dir = '/some/input/dir' - config = metplus_config() + config = metplus_config # set config variables to prevent command from running and bypass check # if input files actually exist @@ -343,7 +343,7 @@ def test_pb2nc_all_fields(metplus_config, config_overrides, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'PB2NCConfig_wrapped') @@ -361,7 +361,7 @@ def test_pb2nc_file_window(metplus_config): begin_value = -3600 end_value = 3600 - config = metplus_config() + config = metplus_config config.set('config', 'PB2NC_FILE_WINDOW_BEGIN', begin_value) config.set('config', 'PB2NC_FILE_WINDOW_END', end_value) wrapper = PB2NCWrapper(config) diff --git a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py index 725278c393..2e2ea15aba 100644 --- a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py +++ b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py @@ -16,7 +16,7 @@ def get_test_data_dir(config, subdir=None): top_dir = os.path.join(top_dir, subdir) return top_dir -def pcp_combine_wrapper(metplus_config, d_type): +def pcp_combine_wrapper(metplus_config_files, d_type): """! Returns a default PCPCombineWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration @@ -26,7 +26,8 @@ def pcp_combine_wrapper(metplus_config, d_type): # the test1.conf file. extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'test1.conf')) - config = metplus_config(extra_configs) + config = metplus_config_files(extra_configs) + if d_type == "FCST": config.set('config', 'FCST_PCP_COMBINE_RUN', True) elif d_type == "OBS": @@ -36,9 +37,9 @@ def pcp_combine_wrapper(metplus_config, d_type): @pytest.mark.wrapper -def test_get_accumulation_1_to_6(metplus_config): +def test_get_accumulation_1_to_6(metplus_config_files): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) input_dir = get_test_data_dir(pcw.config, subdir='accum') task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') @@ -61,9 +62,9 @@ def test_get_accumulation_1_to_6(metplus_config): @pytest.mark.wrapper -def test_get_accumulation_6_to_6(metplus_config): +def test_get_accumulation_6_to_6(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) input_dir = get_test_data_dir(pcw.config, subdir='accum') task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') @@ -83,9 +84,9 @@ def test_get_accumulation_6_to_6(metplus_config): @pytest.mark.wrapper -def test_get_lowest_forecast_file_dated_subdir(metplus_config): +def test_get_lowest_forecast_file_dated_subdir(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802012100", '%Y%m%d%H%M') pcw.c_dict[f'{data_src}_INPUT_DIR'] = input_dir @@ -96,9 +97,9 @@ def test_get_lowest_forecast_file_dated_subdir(metplus_config): @pytest.mark.wrapper -def test_forecast_constant_init(metplus_config): +def test_forecast_constant_init(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) pcw.c_dict['FCST_CONSTANT_INIT'] = True input_dir = get_test_data_dir(pcw.config, subdir='fcst') init_time = datetime.strptime("2018020112", '%Y%m%d%H') @@ -110,9 +111,9 @@ def test_forecast_constant_init(metplus_config): @pytest.mark.wrapper -def test_forecast_not_constant_init(metplus_config): +def test_forecast_not_constant_init(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) pcw.c_dict['FCST_CONSTANT_INIT'] = False input_dir = get_test_data_dir(pcw.config, subdir='fcst') init_time = datetime.strptime("2018020112", '%Y%m%d%H') @@ -125,9 +126,9 @@ def test_forecast_not_constant_init(metplus_config): @pytest.mark.wrapper -def test_get_lowest_forecast_file_no_subdir(metplus_config): +def test_get_lowest_forecast_file_no_subdir(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802012100", '%Y%m%d%H%M') template = "file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc" @@ -139,9 +140,9 @@ def test_get_lowest_forecast_file_no_subdir(metplus_config): @pytest.mark.wrapper -def test_get_lowest_forecast_file_yesterday(metplus_config): +def test_get_lowest_forecast_file_yesterday(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802010600", '%Y%m%d%H%M') template = "file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc" @@ -153,9 +154,9 @@ def test_get_lowest_forecast_file_yesterday(metplus_config): @pytest.mark.wrapper -def test_setup_add_method(metplus_config): +def test_setup_add_method(metplus_config_files): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') time_info = ti_calculate(task_info) @@ -178,9 +179,9 @@ def test_setup_add_method(metplus_config): # how to test? check output? @pytest.mark.wrapper -def test_setup_sum_method(metplus_config): +def test_setup_sum_method(metplus_config_files): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') task_info['lead'] = 0 @@ -190,9 +191,9 @@ def test_setup_sum_method(metplus_config): @pytest.mark.wrapper -def test_setup_subtract_method(metplus_config): +def test_setup_subtract_method(metplus_config_files): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config, data_src) + pcw = pcp_combine_wrapper(metplus_config_files, data_src) task_info = {} task_info['valid'] = datetime.strptime("201609050000", '%Y%m%d%H%M') task_info['lead_hours'] = 9 diff --git a/internal/tests/pytests/wrappers/plot_point_obs/test_plot_point_obs_wrapper.py b/internal/tests/pytests/wrappers/plot_point_obs/test_plot_point_obs_wrapper.py index 3e779d81a3..fd05410467 100644 --- a/internal/tests/pytests/wrappers/plot_point_obs/test_plot_point_obs_wrapper.py +++ b/internal/tests/pytests/wrappers/plot_point_obs/test_plot_point_obs_wrapper.py @@ -217,7 +217,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper_c def test_plot_point_obs(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -288,7 +288,7 @@ def test_plot_point_obs(metplus_config, config_overrides, env_var_values): def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'PlotPointObsConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/point2grid/test_point2grid.py b/internal/tests/pytests/wrappers/point2grid/test_point2grid.py index e4b27f5d60..c0f51d47fd 100644 --- a/internal/tests/pytests/wrappers/point2grid/test_point2grid.py +++ b/internal/tests/pytests/wrappers/point2grid/test_point2grid.py @@ -13,7 +13,7 @@ def p2g_wrapper(metplus_config): files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) return Point2GridWrapper(config) diff --git a/internal/tests/pytests/wrappers/point_stat/test_point_stat_wrapper.py b/internal/tests/pytests/wrappers/point_stat/test_point_stat_wrapper.py index 84ddf4e98c..2c574d2476 100755 --- a/internal/tests/pytests/wrappers/point_stat/test_point_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/point_stat/test_point_stat_wrapper.py @@ -41,7 +41,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper_a def test_met_dictionary_in_var_options(metplus_config): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) config.set('config', 'BOTH_VAR1_NAME', 'name') @@ -514,7 +514,7 @@ def test_point_stat_all_fields(metplus_config, config_overrides, fcst_fmts.append(fcst_fmt) obs_fmts.append(obs_fmt) - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) for index, (fcst, obs) in enumerate(zip(fcsts, obss)): @@ -583,7 +583,7 @@ def test_point_stat_all_fields(metplus_config, config_overrides, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'PointStatConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/regrid_data_plane/test_regrid_data_plane.py b/internal/tests/pytests/wrappers/regrid_data_plane/test_regrid_data_plane.py index 9e6e436bf0..bce689fb23 100644 --- a/internal/tests/pytests/wrappers/regrid_data_plane/test_regrid_data_plane.py +++ b/internal/tests/pytests/wrappers/regrid_data_plane/test_regrid_data_plane.py @@ -15,7 +15,7 @@ def rdp_wrapper(metplus_config): files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) return RegridDataPlaneWrapper(config) @@ -61,7 +61,7 @@ def rdp_wrapper(metplus_config): def test_set_field_command_line_arguments(metplus_config, field_info, expected_arg): data_type = 'FCST' - config = metplus_config() + config = metplus_config rdp = RegridDataPlaneWrapper(config) @@ -128,7 +128,7 @@ def test_set_field_command_line_arguments(metplus_config, field_info, expected_a def test_get_output_names(metplus_config, var_list, expected_names): data_type = 'FCST' - rdp = RegridDataPlaneWrapper(metplus_config()) + rdp = RegridDataPlaneWrapper(metplus_config) assert rdp.get_output_names(var_list, data_type) == expected_names diff --git a/internal/tests/pytests/wrappers/runtime_freq/test_runtime_freq.py b/internal/tests/pytests/wrappers/runtime_freq/test_runtime_freq.py index 0c589349c3..0d0f218cff 100644 --- a/internal/tests/pytests/wrappers/runtime_freq/test_runtime_freq.py +++ b/internal/tests/pytests/wrappers/runtime_freq/test_runtime_freq.py @@ -102,7 +102,7 @@ ) @pytest.mark.wrapper def test_compare_time_info(metplus_config, runtime, filetime, expected_result): - config = metplus_config() + config = metplus_config wrapper = RuntimeFreqWrapper(config) actual_result = wrapper.compare_time_info(runtime, filetime) diff --git a/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py b/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py index d588d2425f..228e89114c 100644 --- a/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py +++ b/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py @@ -37,11 +37,27 @@ def get_input_dirs(config): def series_analysis_wrapper(metplus_config, config_overrides=None): - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), - 'series_test.conf')) - config = metplus_config(extra_configs) - config.set('config', 'LOOP_ORDER', 'processes') + config = metplus_config + config.set('config', 'SERIES_ANALYSIS_STAT_LIST', 'TOTAL, FBAR, OBAR, ME') + config.set('config', 'INIT_TIME_FMT', '%Y%m%d') + config.set('config', 'INIT_BEG', '20141214') + config.set('config', 'INIT_END', '20141214') + config.set('config', 'INIT_INCREMENT', '21600') + config.set('config', 'SERIES_ANALYSIS_BACKGROUND_MAP', 'no') + config.set('config', 'FCST_SERIES_ANALYSIS_INPUT_TEMPLATE', + ('{init?fmt=%Y%m%d_%H}/{storm_id}/FCST_TILE_F{lead?fmt=%3H}_' + 'gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%3H}.nc')) + config.set('config', 'OBS_SERIES_ANALYSIS_INPUT_TEMPLATE', + ('{init?fmt=%Y%m%d_%H}/{storm_id}/OBS_TILE_F{lead?fmt=%3H}_gfs' + '_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%3H}.nc')) + config.set('config', 'EXTRACT_TILES_OUTPUT_DIR', + '{OUTPUT_BASE}/extract_tiles') + config.set('config', 'FCST_SERIES_ANALYSIS_INPUT_DIR', + '{EXTRACT_TILES_OUTPUT_DIR}') + config.set('config', 'OBS_SERIES_ANALYSIS_INPUT_DIR', + '{EXTRACT_TILES_OUTPUT_DIR}') + config.set('config', 'SERIES_ANALYSIS_OUTPUT_DIR', + '{OUTPUT_BASE}/series_analysis_init') if config_overrides: for key, value in config_overrides.items(): config.set('config', key, value) @@ -297,7 +313,7 @@ def set_minimum_config_settings(config): def test_series_analysis_single_field(metplus_config, config_overrides, env_var_values): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -851,7 +867,7 @@ def test_get_netcdf_min_max(metplus_config): def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'SeriesAnalysisConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index fa13f7954a..be9d01c3ff 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -16,7 +16,7 @@ pp = pprint.PrettyPrinter() -def stat_analysis_wrapper(metplus_config): +def stat_analysis_wrapper(metplus_config_files): """! Returns a default StatAnalysisWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration @@ -25,7 +25,7 @@ def stat_analysis_wrapper(metplus_config): # Default, empty StatAnalysisWrapper with some configuration values set # to /path/to: extra_configs = [TEST_CONF] - config = metplus_config(extra_configs) + config = metplus_config_files(extra_configs) handle_tmp_dir(config) return StatAnalysisWrapper(config) @@ -145,7 +145,7 @@ def set_minimum_config_settings(config): @pytest.mark.wrapper_d def test_valid_init_env_vars(metplus_config, config_overrides, expected_env_vars): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) config.set('config', 'INIT_END', '20221015') for key, value in config_overrides.items(): @@ -202,7 +202,7 @@ def test_valid_init_env_vars(metplus_config, config_overrides, @pytest.mark.wrapper_d def test_check_required_job_template(metplus_config, config_overrides, expected_result): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) for key, value in config_overrides.items(): config.set('config', key, value) @@ -287,7 +287,7 @@ def test_check_required_job_template(metplus_config, config_overrides, ) @pytest.mark.wrapper_d def test_get_runtime_settings(metplus_config, c_dict, expected_result): - config = metplus_config() + config = metplus_config wrapper = StatAnalysisWrapper(config) runtime_settings = wrapper.get_runtime_settings(c_dict) @@ -310,7 +310,7 @@ def test_get_runtime_settings(metplus_config, c_dict, expected_result): @pytest.mark.wrapper_d def test_format_conf_list(metplus_config, list_name, config_overrides, expected_value): - config = metplus_config() + config = metplus_config for key, value in config_overrides.items(): config.set('config', key, value) @@ -862,7 +862,7 @@ def test_run_stat_analysis(metplus_config): ) @pytest.mark.wrapper_d def test_get_level_list(metplus_config, data_type, config_list, expected_list): - config = metplus_config() + config = metplus_config config.set('config', f'{data_type}_LEVEL_LIST', config_list) saw = StatAnalysisWrapper(config) @@ -873,7 +873,7 @@ def test_get_level_list(metplus_config, data_type, config_list, expected_list): @pytest.mark.wrapper_d def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config config.set('config', 'INPUT_MUST_EXIST', False) wrapper = StatAnalysisWrapper(config) diff --git a/internal/tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py b/internal/tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py index 248228fa92..b3cb191fa0 100644 --- a/internal/tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_gen/test_tc_gen_wrapper.py @@ -293,7 +293,7 @@ def test_tc_gen(metplus_config, config_overrides, env_var_values): expected_edeck_count = 6 expected_shape_count = 5 - config = metplus_config() + config = metplus_config test_data_dir = os.path.join(config.getdir('METPLUS_BASE'), 'internal', 'tests', @@ -423,7 +423,7 @@ def test_tc_gen(metplus_config, config_overrides, env_var_values): def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', 'TCGenConfig_wrapped') diff --git a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py index 8f9de6d6ff..db0360e796 100644 --- a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py @@ -68,7 +68,7 @@ def set_minimum_config_settings(config, loop_by='INIT'): ) def test_read_storm_info(metplus_config, config_overrides, isOK): """! Check if error is thrown if storm_id and basin or cyclone are set """ - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) # set config variable overrides @@ -94,7 +94,7 @@ def test_parse_storm_id(metplus_config, storm_id, basin, cyclone): Check that it returns wildcard expressions basin and cyclone cannot be parsed from storm ID """ - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -141,7 +141,7 @@ def test_get_bdeck(metplus_config, basin, cyclone, expected_files, combinations of basin/cyclone inputs """ time_info = {'date': datetime(2014, 12, 31, 18)} - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -194,7 +194,7 @@ def test_get_basin_cyclone_from_bdeck(metplus_config, template, filename, expected_basin = other_basin if other_basin else 'al' expected_cyclone = other_cyclone if other_cyclone else '1009' time_info = {'date': datetime(2014, 12, 31, 18)} - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) wrapper = TCPairsWrapper(config) @@ -245,7 +245,7 @@ def test_get_basin_cyclone_from_bdeck(metplus_config, template, filename, @pytest.mark.wrapper def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, storm_type, values_to_check): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -389,7 +389,7 @@ def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, # run using init and valid time variables for loop_by in ['INIT', 'VALID']: remove_beg = remove_end = remove_match_points = False - config = metplus_config() + config = metplus_config set_minimum_config_settings(config, loop_by) @@ -485,7 +485,7 @@ def test_tc_pairs_read_all_files(metplus_config, config_overrides, env_var_values): # run using init and valid time variables for loop_by in ['INIT', 'VALID']: - config = metplus_config() + config = metplus_config set_minimum_config_settings(config, loop_by) @@ -555,7 +555,7 @@ def test_tc_pairs_read_all_files(metplus_config, config_overrides, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config config.set('config', 'INIT_TIME_FMT', time_fmt) config.set('config', 'INIT_BEG', run_times[0]) default_config_file = os.path.join(config.getdir('PARM_BASE'), diff --git a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py index fe66cff3f8..d40c177b7b 100644 --- a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py @@ -10,14 +10,14 @@ from metplus.util import ti_calculate -def get_config(metplus_config): +def get_config(metplus_config_files): extra_configs = [] extra_configs.append(os.path.join(os.path.dirname(__file__), 'tc_stat_conf.conf')) - return metplus_config(extra_configs) + return metplus_config_files(extra_configs) -def tc_stat_wrapper(metplus_config): +def tc_stat_wrapper(metplus_config_files): """! Returns a default TCStatWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration @@ -25,7 +25,7 @@ def tc_stat_wrapper(metplus_config): # Default, empty TcStatWrapper with some configuration values set # to /path/to: - config = get_config(metplus_config) + config = get_config(metplus_config_files) return TCStatWrapper(config) @@ -148,13 +148,13 @@ def test_override_config_in_c_dict(metplus_config, overrides, c_dict): ] ) @pytest.mark.wrapper -def test_handle_jobs(metplus_config, jobs, init_dt, expected_output): +def test_handle_jobs(metplus_config_files, jobs, init_dt, expected_output): if init_dt: time_info = ti_calculate({'init': init_dt}) else: time_info = None - wrapper = tc_stat_wrapper(metplus_config) + wrapper = tc_stat_wrapper(metplus_config_files) output_base = wrapper.config.getdir('OUTPUT_BASE') output_dir = os.path.join(output_base, 'test_handle_jobs') @@ -223,7 +223,7 @@ def cleanup_test_dirs(parent_dirs, output_dir): ] ) @pytest.mark.wrapper -def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, +def test_handle_jobs_create_parent_dir(metplus_config_files, jobs, init_dt, expected_output, parent_dirs): # if init time is provided, calculate other time dict items if init_dt: @@ -231,7 +231,7 @@ def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, else: time_info = None - wrapper = tc_stat_wrapper(metplus_config) + wrapper = tc_stat_wrapper(metplus_config_files) # create directory path relative to OUTPUT_BASE to test that function # creates parent directories properly @@ -265,7 +265,7 @@ def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' - config = metplus_config() + config = metplus_config default_config_file = os.path.join(config.getdir('PARM_BASE'), 'met_config', diff --git a/internal/tests/pytests/wrappers/user_script/test_user_script.py b/internal/tests/pytests/wrappers/user_script/test_user_script.py index b45fe88108..060f8f2a32 100644 --- a/internal/tests/pytests/wrappers/user_script/test_user_script.py +++ b/internal/tests/pytests/wrappers/user_script/test_user_script.py @@ -347,7 +347,7 @@ def set_run_type_info(config, run_type): @pytest.mark.wrapper def test_run_user_script_all_times(metplus_config, input_configs, run_types, expected_cmds): - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) for key, value in input_configs.items(): From 33ae44fecf463f89500a200849341e6355f69e3c Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 14:51:28 -0600 Subject: [PATCH 87/92] fix tests that create more than 1 METplusConfig object in the test --- .../config_metplus/test_config_metplus.py | 32 +++-- .../util/time_looping/test_time_looping.py | 118 +++++++++++------- .../pcp_combine/test_pcp_combine_wrapper.py | 20 +-- .../tc_pairs/test_tc_pairs_wrapper.py | 9 +- 4 files changed, 99 insertions(+), 80 deletions(-) diff --git a/internal/tests/pytests/util/config_metplus/test_config_metplus.py b/internal/tests/pytests/util/config_metplus/test_config_metplus.py index befaa25316..f7161d6c8b 100644 --- a/internal/tests/pytests/util/config_metplus/test_config_metplus.py +++ b/internal/tests/pytests/util/config_metplus/test_config_metplus.py @@ -817,8 +817,13 @@ def test_parse_var_list_series_by(metplus_config): assert actual_sa.get(key) == value +@pytest.mark.parametrize( + 'start_index', [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + ] +) @pytest.mark.util -def test_parse_var_list_priority_fcst(metplus_config): +def test_parse_var_list_priority_fcst(metplus_config, start_index): priority_list = ['FCST_GRID_STAT_VAR1_NAME', 'FCST_GRID_STAT_VAR1_INPUT_FIELD_NAME', 'FCST_GRID_STAT_VAR1_FIELD_NAME', @@ -833,22 +838,15 @@ def test_parse_var_list_priority_fcst(metplus_config): 'BOTH_VAR1_FIELD_NAME', ] time_info = {} - - # loop through priority list, process, then pop first value off and - # process again until all items have been popped. - # This will check that list is in priority order - while(priority_list): - config = metplus_config - for key in priority_list: - config.set('config', key, key.lower()) - - var_list = config_metplus.parse_var_list(config, time_info=time_info, - data_type='FCST', - met_tool='grid_stat') - - assert len(var_list) == 1 - assert var_list[0].get('fcst_name') == priority_list[0].lower() - priority_list.pop(0) + config = metplus_config + for key in priority_list[start_index:]: + config.set('config', key, key.lower()) + + var_list = config_metplus.parse_var_list(config, time_info=time_info, + data_type='FCST', + met_tool='grid_stat') + assert len(var_list) == 1 + assert var_list[0].get('fcst_name') == priority_list[start_index].lower() # test that if wrapper specific field info is specified, it only gets diff --git a/internal/tests/pytests/util/time_looping/test_time_looping.py b/internal/tests/pytests/util/time_looping/test_time_looping.py index 2503ebb71c..240a1cccd8 100644 --- a/internal/tests/pytests/util/time_looping/test_time_looping.py +++ b/internal/tests/pytests/util/time_looping/test_time_looping.py @@ -107,8 +107,13 @@ def test_time_generator_increment(metplus_config): assert True +@pytest.mark.parametrize( + 'prefix', [ + 'INIT', 'VALID', + ] +) @pytest.mark.util -def test_time_generator_error_check(metplus_config): +def test_time_generator_error_check_list(metplus_config, prefix): """! Test that None is returned by the time generator when the time looping config variables are not set properly. Tests: Missing LOOP_BY, @@ -120,65 +125,82 @@ def test_time_generator_error_check(metplus_config): _BEG is after _END, """ time_fmt = '%Y%m%d%H' - for prefix in ['INIT', 'VALID']: - config = metplus_config + config = metplus_config - # unset LOOP_BY - assert next(time_generator(config)) is None - config.set('config', 'LOOP_BY', prefix) + # unset LOOP_BY + assert next(time_generator(config)) is None + config.set('config', 'LOOP_BY', prefix) - # unset _TIME_FMT - assert next(time_generator(config)) is None - config.set('config', f'{prefix}_TIME_FMT', time_fmt) + # unset _TIME_FMT + assert next(time_generator(config)) is None + config.set('config', f'{prefix}_TIME_FMT', time_fmt) - # test [INIT/VALID]_LIST configurations + # test [INIT/VALID]_LIST configurations - # empty _LIST - config.set('config', f'{prefix}_LIST', '') - assert next(time_generator(config)) is None + # empty _LIST + config.set('config', f'{prefix}_LIST', '') + assert next(time_generator(config)) is None - # list value doesn't match format - config.set('config', f'{prefix}_LIST', '202102010412') - assert next(time_generator(config)) is None + # list value doesn't match format + config.set('config', f'{prefix}_LIST', '202102010412') + assert next(time_generator(config)) is None - # 2nd list value doesn't match format - config.set('config', f'{prefix}_LIST', '2021020104, 202102010412') - expected_time = datetime.strptime('2021020104', time_fmt) - generator = time_generator(config) - assert next(generator)[prefix.lower()] == expected_time - assert next(generator) is None + # 2nd list value doesn't match format + config.set('config', f'{prefix}_LIST', '2021020104, 202102010412') + expected_time = datetime.strptime('2021020104', time_fmt) + generator = time_generator(config) + assert next(generator)[prefix.lower()] == expected_time + assert next(generator) is None - # good _LIST - config.set('config', f'{prefix}_LIST', '2021020104') - assert next(time_generator(config))[prefix.lower()] == expected_time + # good _LIST + config.set('config', f'{prefix}_LIST', '2021020104') + assert next(time_generator(config))[prefix.lower()] == expected_time - # get a fresh config object to test BEG/END configurations - config = metplus_config - config.set('config', 'LOOP_BY', prefix) - config.set('config', f'{prefix}_TIME_FMT', time_fmt) - # _BEG doesn't match time format (too long) - config.set('config', f'{prefix}_BEG', '202110311259') - config.set('config', f'{prefix}_END', '2021112012') +@pytest.mark.parametrize( + 'prefix', [ + 'INIT', 'VALID', + ] +) +@pytest.mark.util +def test_time_generator_error_check_beg_end(metplus_config, prefix): + """! Test that None is returned by the time generator when + the time looping config variables are not set properly. Tests: + Missing LOOP_BY, + Missing [INIT/VALID]_TIME_FMT, + Empty [INIT/VALID]_LIST (if set), + List value doesn't match time format, + _BEG or _END value doesn't match format, + _INCREMENT is less than 60 seconds, + _BEG is after _END, + """ + time_fmt = '%Y%m%d%H' + config = metplus_config + config.set('config', 'LOOP_BY', prefix) + config.set('config', f'{prefix}_TIME_FMT', time_fmt) + + # _BEG doesn't match time format (too long) + config.set('config', f'{prefix}_BEG', '202110311259') + config.set('config', f'{prefix}_END', '2021112012') - assert next(time_generator(config)) is None - config.set('config', f'{prefix}_BEG', '2021103112') + assert next(time_generator(config)) is None + config.set('config', f'{prefix}_BEG', '2021103112') - # unset _END uses _BEG value, so it should succeed - assert next(time_generator(config)) is not None + # unset _END uses _BEG value, so it should succeed + assert next(time_generator(config)) is not None - # _END doesn't match time format (too long) - config.set('config', f'{prefix}_END', '202111201259') + # _END doesn't match time format (too long) + config.set('config', f'{prefix}_END', '202111201259') - assert next(time_generator(config)) is None - config.set('config', f'{prefix}_END', '2021112012') - assert next(time_generator(config)) is not None + assert next(time_generator(config)) is None + config.set('config', f'{prefix}_END', '2021112012') + assert next(time_generator(config)) is not None - # _INCREMENT is less than 60 seconds - config.set('config', f'{prefix}_INCREMENT', '10S') - assert next(time_generator(config)) is None - config.set('config', f'{prefix}_INCREMENT', '1d') + # _INCREMENT is less than 60 seconds + config.set('config', f'{prefix}_INCREMENT', '10S') + assert next(time_generator(config)) is None + config.set('config', f'{prefix}_INCREMENT', '1d') - # _END time comes before _BEG time - config.set('config', f'{prefix}_END', '2020112012') - assert next(time_generator(config)) is None + # _END time comes before _BEG time + config.set('config', f'{prefix}_END', '2020112012') + assert next(time_generator(config)) is None diff --git a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py index 2e2ea15aba..6c2ba65523 100644 --- a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py +++ b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py @@ -211,7 +211,7 @@ def test_pcp_combine_add_subhourly(metplus_config): fcst_level = 'Surface' fcst_output_name = 'A001500' fcst_fmt = f'\'name="{fcst_name}"; level="{fcst_level}";\'' - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -276,7 +276,7 @@ def test_pcp_combine_add_subhourly(metplus_config): @pytest.mark.wrapper def test_pcp_combine_bucket(metplus_config): fcst_output_name = 'APCP' - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -356,7 +356,7 @@ def test_pcp_combine_derive(metplus_config, config_overrides, extra_fields): fcst_name = 'APCP' fcst_level = 'A03' fcst_fmt = f'-field \'name="{fcst_name}"; level="{fcst_level}";\'' - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -429,7 +429,7 @@ def test_pcp_combine_derive(metplus_config, config_overrides, extra_fields): def test_pcp_combine_loop_custom(metplus_config): fcst_name = 'APCP' ens_list = ['ens1', 'ens2', 'ens3', 'ens4', 'ens5', 'ens6'] - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -491,7 +491,7 @@ def test_pcp_combine_loop_custom(metplus_config): @pytest.mark.wrapper def test_pcp_combine_subtract(metplus_config): - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -554,7 +554,7 @@ def test_pcp_combine_sum_subhourly(metplus_config): fcst_level = 'Surface' fcst_output_name = 'A001500' fcst_fmt = f'-field \'name="{fcst_name}"; level="{fcst_level}";\'' - config = metplus_config() + config = metplus_config test_data_dir = get_test_data_dir(config) fcst_input_dir = os.path.join(test_data_dir, @@ -632,7 +632,7 @@ def test_pcp_combine_sum_subhourly(metplus_config): def test_handle_name_argument(metplus_config, output_name, extra_output, expected_results): data_src = 'FCST' - config = metplus_config() + config = metplus_config wrapper = PCPCombineWrapper(config) wrapper.c_dict[data_src + '_EXTRA_OUTPUT_NAMES'] = extra_output wrapper._handle_name_argument(output_name, data_src) @@ -671,7 +671,7 @@ def test_handle_name_argument(metplus_config, output_name, extra_output, @pytest.mark.wrapper def test_get_extra_fields(metplus_config, names, levels, expected_args): data_src = 'FCST' - config = metplus_config() + config = metplus_config config.set('config', 'FCST_PCP_COMBINE_RUN', True) config.set('config', 'FCST_PCP_COMBINE_METHOD', 'ADD') config.set('config', 'FCST_PCP_COMBINE_EXTRA_NAMES', names) @@ -688,7 +688,7 @@ def test_get_extra_fields(metplus_config, names, levels, expected_args): @pytest.mark.wrapper def test_add_method_single_file(metplus_config): data_src = 'FCST' - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) config.set('config', 'INPUT_MUST_EXIST', False) @@ -758,7 +758,7 @@ def test_subtract_method_zero_accum(metplus_config): input_level = '"(*,*)"' in_dir = '/some/input/dir' out_dir = '/some/output/dir' - config = metplus_config() + config = metplus_config config.set('config', 'DO_NOT_RUN_EXE', True) config.set('config', 'INPUT_MUST_EXIST', False) diff --git a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py index db0360e796..6ff2364a53 100644 --- a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py @@ -384,12 +384,12 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, ] ) @pytest.mark.wrapper -def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, +def test_tc_pairs_loop_order_processes(metplus_config_files, config_overrides, env_var_values): # run using init and valid time variables for loop_by in ['INIT', 'VALID']: remove_beg = remove_end = remove_match_points = False - config = metplus_config + config = metplus_config_files([]) set_minimum_config_settings(config, loop_by) @@ -439,7 +439,6 @@ def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, f"-out {out_dir}/mlq2014121318.gfso.0104"), ] - all_cmds = wrapper.run_all_times() print(f"ALL COMMANDS: {all_cmds}") assert len(all_cmds) == len(expected_cmds) @@ -481,11 +480,11 @@ def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, ] ) @pytest.mark.wrapper -def test_tc_pairs_read_all_files(metplus_config, config_overrides, +def test_tc_pairs_read_all_files(metplus_config_files, config_overrides, env_var_values): # run using init and valid time variables for loop_by in ['INIT', 'VALID']: - config = metplus_config + config = metplus_config_files([]) set_minimum_config_settings(config, loop_by) From 1646010571467cef44487ccb2af58de204becb71 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 15:25:03 -0600 Subject: [PATCH 88/92] fixed another test that broke --- .../tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py index d40c177b7b..a04736b361 100644 --- a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py @@ -110,8 +110,8 @@ def tc_stat_wrapper(metplus_config_files): ] ) @pytest.mark.wrapper -def test_override_config_in_c_dict(metplus_config, overrides, c_dict): - config = get_config(metplus_config) +def test_override_config_in_c_dict(metplus_config_files, overrides, c_dict): + config = get_config(metplus_config_files) instance = 'tc_stat_overrides' if not config.has_section(instance): config.add_section(instance) From d89f2182c9bf990631802e6567011b876d86050a Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 16:35:16 -0600 Subject: [PATCH 89/92] updated tests that use a config file to set the values in the test file instead --- .../tcmpr_plotter/test_tcmpr_plotter.py | 6 +- .../ascii2nc/test_ascii2nc_wrapper.py | 38 +++++-- .../extract_tiles/extract_tiles_test.conf | 74 ------------ .../extract_tiles/test_extract_tiles.py | 67 ++++++----- internal/tests/pytests/wrappers/pb2nc/conf1 | 72 ------------ .../wrappers/pb2nc/test_pb2nc_wrapper.py | 53 +++++---- .../pytests/wrappers/pcp_combine/test1.conf | 35 ------ .../pcp_combine/test_pcp_combine_wrapper.py | 77 ++++++++----- .../tc_pairs/tc_pairs_wrapper_test.conf | 105 ------------------ .../tc_pairs/test_tc_pairs_wrapper.py | 6 +- 10 files changed, 151 insertions(+), 382 deletions(-) delete mode 100644 internal/tests/pytests/wrappers/extract_tiles/extract_tiles_test.conf delete mode 100644 internal/tests/pytests/wrappers/pb2nc/conf1 delete mode 100644 internal/tests/pytests/wrappers/pcp_combine/test1.conf delete mode 100644 internal/tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf diff --git a/internal/tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py b/internal/tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py index 23d3a27153..0b7dca4e6c 100644 --- a/internal/tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py +++ b/internal/tests/pytests/plotting/tcmpr_plotter/test_tcmpr_plotter.py @@ -99,7 +99,7 @@ def set_minimum_config_settings(config): ) @pytest.mark.plotting def test_read_loop_info(metplus_config, config_overrides, expected_loop_args): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -181,7 +181,7 @@ def test_read_loop_info(metplus_config, config_overrides, expected_loop_args): @pytest.mark.plotting def test_tcmpr_plotter_loop(metplus_config, config_overrides, expected_strings): - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) @@ -278,7 +278,7 @@ def test_tcmpr_plotter(metplus_config, config_overrides, expected_string): expected_string = f' {expected_string}' for single_file in [True, False]: - config = metplus_config() + config = metplus_config set_minimum_config_settings(config) diff --git a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py index 4065c0dbe2..0af69609ad 100644 --- a/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/ascii2nc/test_ascii2nc_wrapper.py @@ -8,10 +8,32 @@ from metplus.wrappers.ascii2nc_wrapper import ASCII2NCWrapper -def ascii2nc_wrapper(metplus_config_files, config_path=None, config_overrides=None): - config = metplus_config_files([config_path]) - overrides = {'DO_NOT_RUN_EXE': True, - 'INPUT_MUST_EXIST': False} +def ascii2nc_wrapper(metplus_config, config_overrides=None): + config = metplus_config + overrides = { + 'DO_NOT_RUN_EXE': True, + 'INPUT_MUST_EXIST': False, + 'PROCESS_LIST': 'ASCII2NC', + 'LOOP_BY': 'VALID', + 'VALID_TIME_FMT': '%Y%m%d%H', + 'VALID_BEG': '2010010112', + 'VALID_END': '2010010112', + 'VALID_INCREMENT': '1M', + 'ASCII2NC_INPUT_TEMPLATE': '{INPUT_BASE}/met_test/data/sample_obs/ascii/precip24_{valid?fmt=%Y%m%d%H}.ascii', + 'ASCII2NC_OUTPUT_TEMPLATE': '{OUTPUT_BASE}/ascii2nc/precip24_{valid?fmt=%Y%m%d%H}.nc', + 'ASCII2NC_CONFIG_FILE': '{PARM_BASE}/met_config/Ascii2NcConfig_wrapped', + 'ASCII2NC_TIME_SUMMARY_FLAG': 'False', + 'ASCII2NC_TIME_SUMMARY_RAW_DATA': 'False', + 'ASCII2NC_TIME_SUMMARY_BEG': '000000', + 'ASCII2NC_TIME_SUMMARY_END': '235959', + 'ASCII2NC_TIME_SUMMARY_STEP': '300', + 'ASCII2NC_TIME_SUMMARY_WIDTH': '600', + 'ASCII2NC_TIME_SUMMARY_GRIB_CODES': '11, 204, 211', + 'ASCII2NC_TIME_SUMMARY_VAR_NAMES': '', + 'ASCII2NC_TIME_SUMMARY_TYPES': 'min, max, range, mean, stdev, median, p80', + 'ASCII2NC_TIME_SUMMARY_VALID_FREQ': '0', + 'ASCII2NC_TIME_SUMMARY_VALID_THRESH': '0.0', + } if config_overrides: for key, value in config_overrides.items(): overrides[key] = value @@ -134,13 +156,9 @@ def ascii2nc_wrapper(metplus_config_files, config_path=None, config_overrides=No ] ) @pytest.mark.wrapper -def test_ascii2nc_wrapper(metplus_config_files, config_overrides, +def test_ascii2nc_wrapper(metplus_config, config_overrides, env_var_values): - wrapper = ( - ascii2nc_wrapper(metplus_config_files, - 'use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf', - config_overrides) - ) + wrapper = ascii2nc_wrapper(metplus_config, config_overrides) assert wrapper.isOK input_path = wrapper.config.getraw('config', 'ASCII2NC_INPUT_TEMPLATE') diff --git a/internal/tests/pytests/wrappers/extract_tiles/extract_tiles_test.conf b/internal/tests/pytests/wrappers/extract_tiles/extract_tiles_test.conf deleted file mode 100644 index 4f34897b33..0000000000 --- a/internal/tests/pytests/wrappers/extract_tiles/extract_tiles_test.conf +++ /dev/null @@ -1,74 +0,0 @@ -# -# CONFIGURATION -# -[config] - -# Loop over each process in the process list (set in PROCESS_LIST) for all times in the time window of -# interest. -LOOP_ORDER = processes - -PROCESS_LIST = ExtractTiles - -# The init time begin and end times, increment -LOOP_BY = INIT -INIT_TIME_FMT = %Y%m%d -INIT_BEG = 20141214 -INIT_END = 20141214 - -# This is the step-size. Increment in seconds from the begin time to the end -# time -INIT_INCREMENT = 21600 ;; set to every 6 hours=21600 seconds - -# A list of times to include, in format YYYYMMDD_hh -INIT_INCLUDE = - -# A list of times to exclude, in format YYYYMMDD_hh -INIT_EXCLUDE = - -# Constants used in creating the tile grid, used by extract tiles -EXTRACT_TILES_NLAT = 60 -EXTRACT_TILES_NLON = 60 - -# Resolution of data in degrees, used by extract tiles -EXTRACT_TILES_DLAT = 0.5 -EXTRACT_TILES_DLON = 0.5 - -# Degrees to subtract from the center lat and lon to -# calculate the lower left lat (lat_ll) and lower -# left lon (lon_ll) for a grid that is 2n X 2m, -# where n = EXTRACT_TILES_LAT_ADJ degrees and m = EXTRACT_TILES_LON_ADJ degrees. -# For this case, where n=15 and m=15, this results -# in a 30 deg X 30 deg grid. Used by extract tiles -EXTRACT_TILES_LON_ADJ = 15 -EXTRACT_TILES_LAT_ADJ = 15 - -#EXTRACT_TILES_FILTER_OPTS = -EXTRACT_TILES_FILTER_OPTS = -basin ML -SERIES_ANALYSIS_FILTER_OPTS = -init_beg {INIT_BEG} -init_end {INIT_END} - -# OVERWRITE OPTIONS -# Don't overwrite filter files if they already exist. -# Set to no if you do NOT want to override existing files -# Set to yes if you do want to override existing files -EXTRACT_TILES_OVERWRITE_TRACK = no - -# if = initializes to an empty string '' or list [], indicating all vars are to be considered -EXTRACT_TILES_VAR_LIST = - -# -# FILENAME TEMPLATES -# -[filename_templates] -# Define the format of the filenames -FCST_EXTRACT_TILES_INPUT_TEMPLATE = gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%HHH}.grb2 -OBS_EXTRACT_TILES_INPUT_TEMPLATE = gfs_4_{valid?fmt=%Y%m%d}_{valid?fmt=%H}00_000.grb2 - -[dir] -# Location of your model data of interest -EXTRACT_TILES_GRID_INPUT_DIR = {INPUT_BASE}/cyclone_track_feature/reduced_model_data - -EXTRACT_TILES_PAIRS_INPUT_DIR = {OUTPUT_BASE}/tc_pairs - -# Use this setting to separate the filtered track files from -# the series analysis directory. -EXTRACT_TILES_OUTPUT_DIR = {OUTPUT_BASE}/extract_tiles diff --git a/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py b/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py index 2bcf588e99..aa71f0eb56 100644 --- a/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py +++ b/internal/tests/pytests/wrappers/extract_tiles/test_extract_tiles.py @@ -8,17 +8,32 @@ from metplus.wrappers.extract_tiles_wrapper import ExtractTilesWrapper -def get_config(metplus_config_files): - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), - 'extract_tiles_test.conf')) - return metplus_config_files(extra_configs) +def extract_tiles_wrapper(metplus_config): + config = metplus_config + config.set('config', 'PROCESS_LIST', 'ExtractTiles') + config.set('config', 'LOOP_BY', 'INIT') + config.set('config', 'INIT_TIME_FMT', '%Y%m%d') + config.set('config', 'INIT_BEG', '20141214') + config.set('config', 'INIT_END', '20141214') + config.set('config', 'INIT_INCREMENT', '21600') + config.set('config', 'EXTRACT_TILES_NLAT', '60') + config.set('config', 'EXTRACT_TILES_NLON', '60') + config.set('config', 'EXTRACT_TILES_DLAT', '0.5') + config.set('config', 'EXTRACT_TILES_DLON', '0.5') + config.set('config', 'EXTRACT_TILES_LAT_ADJ', '15') + config.set('config', 'EXTRACT_TILES_LON_ADJ', '15') + config.set('config', 'EXTRACT_TILES_FILTER_OPTS', '-basin ML') + config.set('config', 'FCST_EXTRACT_TILES_INPUT_TEMPLATE', + 'gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%HHH}.grb2') + config.set('config', 'OBS_EXTRACT_TILES_INPUT_TEMPLATE', + 'gfs_4_{valid?fmt=%Y%m%d}_{valid?fmt=%H}00_000.grb2') + config.set('config', 'EXTRACT_TILES_GRID_INPUT_DIR', + '{INPUT_BASE}/cyclone_track_feature/reduced_model_data') + config.set('config', 'EXTRACT_TILES_PAIRS_INPUT_DIR', + '{OUTPUT_BASE}/tc_pairs') + config.set('config', 'EXTRACT_TILES_OUTPUT_DIR', + '{OUTPUT_BASE}/extract_tiles') - -def extract_tiles_wrapper(metplus_config_files): - config = get_config(metplus_config_files) - - config.set('config', 'LOOP_ORDER', 'processes') wrapper = ExtractTilesWrapper(config) return wrapper @@ -60,8 +75,8 @@ def get_input_lines(filepath): ] ) @pytest.mark.wrapper -def test_get_object_indices(metplus_config_files, object_cats, expected_indices): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_object_indices(metplus_config, object_cats, expected_indices): + wrapper = extract_tiles_wrapper(metplus_config) assert wrapper.get_object_indices(object_cats) == expected_indices @@ -79,8 +94,8 @@ def test_get_object_indices(metplus_config_files, object_cats, expected_indices) ] ) @pytest.mark.wrapper -def test_get_header_indices(metplus_config_files,header_name, index): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_header_indices(metplus_config,header_name, index): + wrapper = extract_tiles_wrapper(metplus_config) header = get_storm_lines(wrapper)[0] idx_dict = wrapper.get_header_indices(header) assert(idx_dict[header_name] == index) @@ -98,8 +113,8 @@ def test_get_header_indices(metplus_config_files,header_name, index): ] ) @pytest.mark.wrapper -def test_get_header_indices_mtd(metplus_config_files, header_name, index): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_header_indices_mtd(metplus_config, header_name, index): + wrapper = extract_tiles_wrapper(metplus_config) header = get_mtd_lines(wrapper)[0] idx_dict = wrapper.get_header_indices(header, 'MTD') assert(idx_dict[header_name] == index) @@ -119,8 +134,8 @@ def test_get_header_indices_mtd(metplus_config_files, header_name, index): ] ) @pytest.mark.wrapper -def test_get_data_from_track_line(metplus_config_files, header_name, value): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_data_from_track_line(metplus_config, header_name, value): + wrapper = extract_tiles_wrapper(metplus_config) storm_lines = get_storm_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header) @@ -140,8 +155,8 @@ def test_get_data_from_track_line(metplus_config_files, header_name, value): ] ) @pytest.mark.wrapper -def test_get_data_from_track_line_mtd(metplus_config_files, header_name, value): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_data_from_track_line_mtd(metplus_config, header_name, value): + wrapper = extract_tiles_wrapper(metplus_config) storm_lines = get_mtd_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header, 'MTD') @@ -150,9 +165,9 @@ def test_get_data_from_track_line_mtd(metplus_config_files, header_name, value): @pytest.mark.wrapper -def test_set_time_info_from_track_data(metplus_config_files): +def test_set_time_info_from_track_data(metplus_config): storm_id = 'ML1221072014' - wrapper = extract_tiles_wrapper(metplus_config_files) + wrapper = extract_tiles_wrapper(metplus_config) storm_lines = get_storm_lines(wrapper) header = storm_lines[0] idx_dict = wrapper.get_header_indices(header) @@ -174,8 +189,8 @@ def test_set_time_info_from_track_data(metplus_config_files): ] ) @pytest.mark.wrapper -def test_get_grid_info(metplus_config_files, lat, lon, expected_result): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_grid_info(metplus_config, lat, lon, expected_result): + wrapper = extract_tiles_wrapper(metplus_config) assert(wrapper.get_grid_info(lat, lon, 'FCST') == expected_result) @@ -186,7 +201,7 @@ def test_get_grid_info(metplus_config_files, lat, lon, expected_result): ] ) @pytest.mark.wrapper -def test_get_grid(metplus_config_files, lat, lon, expected_result): - wrapper = extract_tiles_wrapper(metplus_config_files) +def test_get_grid(metplus_config, lat, lon, expected_result): + wrapper = extract_tiles_wrapper(metplus_config) storm_data = {'ALAT': lat, 'ALON': lon} assert(wrapper.get_grid('FCST', storm_data) == expected_result) diff --git a/internal/tests/pytests/wrappers/pb2nc/conf1 b/internal/tests/pytests/wrappers/pb2nc/conf1 deleted file mode 100644 index a1c694fcdb..0000000000 --- a/internal/tests/pytests/wrappers/pb2nc/conf1 +++ /dev/null @@ -1,72 +0,0 @@ -[config] -## Configuration-related settings such as the process list, begin and end times, etc. -PROCESS_LIST = PB2NC - -## LOOP_ORDER -## Options are: processes, times -## Looping by time- runs all items in the PROCESS_LIST for each -## initialization time and repeats until all times have been evaluated. -## Looping by processes- run each item in the PROCESS_LIST for all -## specified initialization times then repeat for the next item in the -## PROCESS_LIST. -#LOOP_ORDER = processes - -# Logging levels: DEBUG, INFO, WARN, ERROR (most verbose is DEBUG) -#LOG_LEVEL = DEBUG - -## MET Configuration files for pb2nc -PB2NC_CONFIG_FILE = {PARM_BASE}/met_config/PB2NCConfig_wrapped - -PB2NC_SKIP_IF_OUTPUT_EXISTS = True - -#LOOP_BY = VALID -#VALID_TIME_FMT = %Y%m%d -#VALID_BEG = 20170601 -#VALID_END = 20170603 -#VALID_INCREMENT = 86400 - -#LEAD_SEQ = 0 - - -# For both pb2nc and point_stat, the obs_window dictionary: -#OBS_WINDOW_BEGIN = -2700 -#OBS_WINDOW_END = 2700 - -# Either conus_sfc or upper_air -PB2NC_VERTICAL_LOCATION = conus_sfc - -# -# PB2NC -# -# These are appended with PB2NC to differentiate the GRID, POLY, and MESSAGE_TYPE for point_stat. -PB2NC_GRID = -PB2NC_POLY = -PB2NC_STATION_ID = -PB2NC_MESSAGE_TYPE = - -# Leave empty to process all -PB2NC_OBS_BUFR_VAR_LIST = PMO, TOB, TDO, UOB, VOB, PWO, TOCC, D_RH - -#*********** -# ***NOTE*** -#*********** -# SET TIME_SUMMARY_FLAG to False. There is a bug in met-6.1. -## For defining the time periods for summarization -# False for no time summary, True otherwise -PB2NC_TIME_SUMMARY_FLAG = False -PB2NC_TIME_SUMMARY_BEG = 000000 ;; start time of time summary in HHMMSS format -PB2NC_TIME_SUMMARY_END = 235959 ;; end time of time summary in HHMMSS format -PB2NC_TIME_SUMMARY_VAR_NAMES = PMO,TOB,TDO,UOB,VOB,PWO,TOCC -PB2NC_TIME_SUMMARY_TYPES = min, max, range, mean, stdev, median, p80 ;; a list of the statistics to summarize - -# Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -#MODEL_NAME = gfs -#OBS_NAME = nam - -[dir] -PB2NC_INPUT_DIR = {INPUT_BASE}/grid_to_obs/prepbufr/nam - -[filename_templates] -PB2NC_INPUT_TEMPLATE = t{da_init?fmt=%2H}z.prepbufr.tm{offset?fmt=%2H} - -PB2NC_OUTPUT_TEMPLATE = {valid?fmt=%Y%m%d}/nam.{valid?fmt=%Y%m%d%H}.nc \ No newline at end of file diff --git a/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py b/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py index 65af743f68..8096cc1e4c 100644 --- a/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py +++ b/internal/tests/pytests/wrappers/pb2nc/test_pb2nc_wrapper.py @@ -11,23 +11,20 @@ from metplus.util import do_string_sub -def pb2nc_wrapper(metplus_config_files): +def pb2nc_wrapper(metplus_config): """! Returns a default PB2NCWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # PB2NCWrapper with configuration values determined by what is set in - # the pb2nc_test.conf file. - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'conf1')) - config = metplus_config_files(extra_configs) + to over-ride these /path/to values. + """ + config = metplus_config + config.set('config', 'PB2NC_INPUT_TEMPLATE', + 't{da_init?fmt=%2H}z.prepbufr.tm{offset?fmt=%2H}') return PB2NCWrapper(config) @pytest.mark.parametrize( - # key = grid_id, value = expected reformatted grid id - 'exists, skip, run', [ + 'exists, skip, run', [ (True, True, False), (True, False, True), (False, True, True), @@ -35,8 +32,8 @@ def pb2nc_wrapper(metplus_config_files): ] ) @pytest.mark.wrapper -def test_find_and_check_output_file_skip(metplus_config_files, exists, skip, run): - pb = pb2nc_wrapper(metplus_config_files) +def test_find_and_check_output_file_skip(metplus_config, exists, skip, run): + pb = pb2nc_wrapper(metplus_config) exist_file = 'wackyfilenametocreate' non_exist_file = 'wackyfilethatdoesntexist' @@ -67,16 +64,16 @@ def test_find_and_check_output_file_skip(metplus_config_files, exists, skip, run # --------------------- @pytest.mark.parametrize( # list of input files - 'infiles', [ - [], - ['file1'], - ['file1', 'file2'], - ['file1', 'file2', 'file3'], - ] + 'infiles', [ + [], + ['file1'], + ['file1', 'file2'], + ['file1', 'file2', 'file3'], + ] ) @pytest.mark.wrapper -def test_get_command(metplus_config_files, infiles): - pb = pb2nc_wrapper(metplus_config_files) +def test_get_command(metplus_config, infiles): + pb = pb2nc_wrapper(metplus_config) pb.outfile = 'outfilename.txt' pb.outdir = pb.config.getdir('OUTPUT_BASE') outpath = os.path.join(pb.outdir, pb.outfile) @@ -101,16 +98,16 @@ def test_get_command(metplus_config_files, infiles): @pytest.mark.parametrize( # offset = list of offsets to search # offset_to_find = expected offset file to find, None if no files should be found - 'offsets, offset_to_find', [ - ([6, 5, 4, 3], 5), - ([6, 4, 3], 3), - ([2, 3, 4, 5, 6], 3), - ([2, 4, 6], None), - ] + 'offsets, offset_to_find', [ + ([6, 5, 4, 3], 5), + ([6, 4, 3], 3), + ([2, 3, 4, 5, 6], 3), + ([2, 4, 6], None), + ] ) @pytest.mark.wrapper -def test_find_input_files(metplus_config_files, offsets, offset_to_find): - pb = pb2nc_wrapper(metplus_config_files) +def test_find_input_files(metplus_config, offsets, offset_to_find): + pb = pb2nc_wrapper(metplus_config) # for valid 20190201_12, offsets 3 and 5, create files to find # in the fake input directory based on input template input_dict = { 'valid' : datetime.datetime(2019, 2, 1, 12) } diff --git a/internal/tests/pytests/wrappers/pcp_combine/test1.conf b/internal/tests/pytests/wrappers/pcp_combine/test1.conf deleted file mode 100644 index 0d50280991..0000000000 --- a/internal/tests/pytests/wrappers/pcp_combine/test1.conf +++ /dev/null @@ -1,35 +0,0 @@ -[config] -FCST_PCP_COMBINE_INPUT_ACCUMS = 6 -FCST_PCP_COMBINE_INPUT_NAMES = P06M_NONE -FCST_PCP_COMBINE_INPUT_LEVELS = "(*,*)" - -OBS_PCP_COMBINE_INPUT_ACCUMS = 1 -OBS_PCP_COMBINE_INPUT_NAMES = P01M_NONE - -OBS_PCP_COMBINE_DATA_INTERVAL = 1 -OBS_PCP_COMBINE_TIMES_PER_FILE = 4 - -FCST_PCP_COMBINE_INPUT_DATATYPE = NETCDF -OBS_PCP_COMBINE_INPUT_DATATYPE = NETCDF - -FCST_PCP_COMBINE_RUN = True - -FCST_PCP_COMBINE_METHOD = ADD - -OBS_PCP_COMBINE_RUN = True - -OBS_PCP_COMBINE_METHOD = ADD - -[dir] -OBS_PCP_COMBINE_INPUT_DIR = {METPLUS_BASE}/internal/tests/data/accum -OBS_PCP_COMBINE_OUTPUT_DIR = {OUTPUT_BASE}/internal/tests/data/fakeout - -FCST_PCP_COMBINE_INPUT_DIR = {METPLUS_BASE}/internal/tests/data/fcst -FCST_PCP_COMBINE_OUTPUT_DIR = {OUTPUT_BASE}/internal/tests/data/fakeout - -[filename_templates] -OBS_PCP_COMBINE_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/file.{valid?fmt=%Y%m%d%H}.{level?fmt=%HH}h -OBS_PCP_COMBINE_OUTPUT_TEMPLATE = {valid?fmt=%Y%m%d}/outfile.{valid?fmt=%Y%m%d%H}_A{level?fmt=%HH}h -FCST_PCP_COMBINE_INPUT_TEMPLATE = {init?fmt=%Y%m%d}/file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc -FCST2_PCP_COMBINE_INPUT_TEMPLATE = file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc -FCST_PCP_COMBINE_OUTPUT_TEMPLATE = {valid?fmt=%Y%m%d}/file.{valid?fmt=%Y%m%d%H}_A{level?fmt=%HHH}.nc \ No newline at end of file diff --git a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py index 6c2ba65523..54d1c2f605 100644 --- a/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py +++ b/internal/tests/pytests/wrappers/pcp_combine/test_pcp_combine_wrapper.py @@ -16,17 +16,40 @@ def get_test_data_dir(config, subdir=None): top_dir = os.path.join(top_dir, subdir) return top_dir -def pcp_combine_wrapper(metplus_config_files, d_type): + +def pcp_combine_wrapper(metplus_config, d_type): """! Returns a default PCPCombineWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - - # PCPCombineWrapper with configuration values determined by what is set in - # the test1.conf file. - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test1.conf')) - config = metplus_config_files(extra_configs) + config = metplus_config + config.set('config', 'FCST_PCP_COMBINE_INPUT_ACCUMS', '6') + config.set('config', 'FCST_PCP_COMBINE_INPUT_NAMES', 'P06M_NONE') + config.set('config', 'FCST_PCP_COMBINE_INPUT_LEVELS', '"(*,*)"') + config.set('config', 'OBS_PCP_COMBINE_INPUT_ACCUMS', '1') + config.set('config', 'OBS_PCP_COMBINE_INPUT_NAMES', 'P01M_NONE') + config.set('config', 'OBS_PCP_COMBINE_DATA_INTERVAL', '1') + config.set('config', 'OBS_PCP_COMBINE_TIMES_PER_FILE', '4') + config.set('config', 'FCST_PCP_COMBINE_METHOD', 'ADD') + config.set('config', 'OBS_PCP_COMBINE_METHOD', 'ADD') + config.set('config', 'OBS_PCP_COMBINE_INPUT_DIR', + '{METPLUS_BASE}/internal/tests/data/accum') + config.set('config', 'OBS_PCP_COMBINE_OUTPUT_DIR', + '{OUTPUT_BASE}/internal/tests/data/fakeout') + config.set('config', 'FCST_PCP_COMBINE_INPUT_DIR', + '{METPLUS_BASE}/internal/tests/data/fcst') + config.set('config', 'FCST_PCP_COMBINE_OUTPUT_DIR', + '{OUTPUT_BASE}/internal/tests/data/fakeout') + config.set('config', 'OBS_PCP_COMBINE_INPUT_TEMPLATE', + '{valid?fmt=%Y%m%d}/file.{valid?fmt=%Y%m%d%H}.{level?fmt=%HH}h') + config.set('config', 'OBS_PCP_COMBINE_OUTPUT_TEMPLATE', + '{valid?fmt=%Y%m%d}/outfile.{valid?fmt=%Y%m%d%H}_A{level?fmt=%HH}h') + config.set('config', 'FCST_PCP_COMBINE_INPUT_TEMPLATE', + '{init?fmt=%Y%m%d}/file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc') + config.set('config', 'FCST2_PCP_COMBINE_INPUT_TEMPLATE', + 'file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc') + config.set('config', 'FCST_PCP_COMBINE_OUTPUT_TEMPLATE', + '{valid?fmt=%Y%m%d}/file.{valid?fmt=%Y%m%d%H}_A{level?fmt=%HHH}.nc') if d_type == "FCST": config.set('config', 'FCST_PCP_COMBINE_RUN', True) @@ -37,9 +60,9 @@ def pcp_combine_wrapper(metplus_config_files, d_type): @pytest.mark.wrapper -def test_get_accumulation_1_to_6(metplus_config_files): +def test_get_accumulation_1_to_6(metplus_config): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) input_dir = get_test_data_dir(pcw.config, subdir='accum') task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') @@ -62,9 +85,9 @@ def test_get_accumulation_1_to_6(metplus_config_files): @pytest.mark.wrapper -def test_get_accumulation_6_to_6(metplus_config_files): +def test_get_accumulation_6_to_6(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) input_dir = get_test_data_dir(pcw.config, subdir='accum') task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') @@ -84,9 +107,9 @@ def test_get_accumulation_6_to_6(metplus_config_files): @pytest.mark.wrapper -def test_get_lowest_forecast_file_dated_subdir(metplus_config_files): +def test_get_lowest_forecast_file_dated_subdir(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802012100", '%Y%m%d%H%M') pcw.c_dict[f'{data_src}_INPUT_DIR'] = input_dir @@ -97,9 +120,9 @@ def test_get_lowest_forecast_file_dated_subdir(metplus_config_files): @pytest.mark.wrapper -def test_forecast_constant_init(metplus_config_files): +def test_forecast_constant_init(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) pcw.c_dict['FCST_CONSTANT_INIT'] = True input_dir = get_test_data_dir(pcw.config, subdir='fcst') init_time = datetime.strptime("2018020112", '%Y%m%d%H') @@ -111,9 +134,9 @@ def test_forecast_constant_init(metplus_config_files): @pytest.mark.wrapper -def test_forecast_not_constant_init(metplus_config_files): +def test_forecast_not_constant_init(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) pcw.c_dict['FCST_CONSTANT_INIT'] = False input_dir = get_test_data_dir(pcw.config, subdir='fcst') init_time = datetime.strptime("2018020112", '%Y%m%d%H') @@ -126,9 +149,9 @@ def test_forecast_not_constant_init(metplus_config_files): @pytest.mark.wrapper -def test_get_lowest_forecast_file_no_subdir(metplus_config_files): +def test_get_lowest_forecast_file_no_subdir(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802012100", '%Y%m%d%H%M') template = "file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc" @@ -140,9 +163,9 @@ def test_get_lowest_forecast_file_no_subdir(metplus_config_files): @pytest.mark.wrapper -def test_get_lowest_forecast_file_yesterday(metplus_config_files): +def test_get_lowest_forecast_file_yesterday(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) input_dir = get_test_data_dir(pcw.config, subdir='fcst') valid_time = datetime.strptime("201802010600", '%Y%m%d%H%M') template = "file.{init?fmt=%Y%m%d%H}f{lead?fmt=%HHH}.nc" @@ -154,9 +177,9 @@ def test_get_lowest_forecast_file_yesterday(metplus_config_files): @pytest.mark.wrapper -def test_setup_add_method(metplus_config_files): +def test_setup_add_method(metplus_config): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') time_info = ti_calculate(task_info) @@ -179,9 +202,9 @@ def test_setup_add_method(metplus_config_files): # how to test? check output? @pytest.mark.wrapper -def test_setup_sum_method(metplus_config_files): +def test_setup_sum_method(metplus_config): data_src = "OBS" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) task_info = {} task_info['valid'] = datetime.strptime("2016090418", '%Y%m%d%H') task_info['lead'] = 0 @@ -191,9 +214,9 @@ def test_setup_sum_method(metplus_config_files): @pytest.mark.wrapper -def test_setup_subtract_method(metplus_config_files): +def test_setup_subtract_method(metplus_config): data_src = "FCST" - pcw = pcp_combine_wrapper(metplus_config_files, data_src) + pcw = pcp_combine_wrapper(metplus_config, data_src) task_info = {} task_info['valid'] = datetime.strptime("201609050000", '%Y%m%d%H%M') task_info['lead_hours'] = 9 diff --git a/internal/tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf b/internal/tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf deleted file mode 100644 index 8c574d2e5f..0000000000 --- a/internal/tests/pytests/wrappers/tc_pairs/tc_pairs_wrapper_test.conf +++ /dev/null @@ -1,105 +0,0 @@ -# -# CONFIGURATION -# -[config] -LOOP_METHOD = processes -# Configuration files -TC_PAIRS_CONFIG_FILE = {PARM_BASE}/met_config/TCPairsConfig_wrapped - -PROCESS_LIST = TCPairs - -# The init time begin and end times, increment, and last init hour. -INIT_TIME_FMT = %Y%m%d -INIT_BEG = 20141201 -INIT_END = 20141231 -INIT_INCREMENT = 21600 ;; set to every 6 hours=21600 seconds -TC_PAIRS_INIT_INCLUDE = -TC_PAIRS_INIT_EXCLUDE = - -TC_PAIRS_VALID_BEG = -TC_PAIRS_VALID_END = - -TC_PAIRS_READ_ALL_FILES = no - -# set to true or yes to reformat track data into ATCF format expected by tc_pairs -TC_PAIRS_REFORMAT_DECK = yes -TC_PAIRS_REFORMAT_TYPE = SBU - - -# TC PAIRS filtering options -TC_PAIRS_MISSING_VAL_TO_REPLACE = -99 -TC_PAIRS_MISSING_VAL = -9999 - - -# OVERWRITE OPTIONS -# Don't overwrite filter files if they already exist. -# Set to no if you do NOT want to override existing files -# Set to yes if you do want to override existing files -#OVERWRITE_TRACK = yes -TC_PAIRS_SKIP_IF_REFORMAT_EXISTS = no -TC_PAIRS_SKIP_IF_OUTPUT_EXISTS = no - -# List of models to be used (white space or comma separated) eg: DSHP, LGEM, HWRF -# If no models are listed, then process all models in the input file(s). -MODEL = - -# List of storm ids of interest (space or comma separated) e.g.: AL112012, AL122012 -# If no storm ids are listed, then process all storm ids in the input file(s). -TC_PAIRS_STORM_ID = - -# Basins (of origin/region). Indicate with space or comma-separated list of regions, eg. AL: for North Atlantic, -# WP: Western North Pacific, CP: Central North Pacific, SH: Southern Hemisphere, IO: North Indian Ocean, LS: Southern -# Hemisphere -TC_PAIRS_BASIN = - -# Cyclone, a space or comma-separated list of cyclone numbers. If left empty, all cyclones will be used. -TC_PAIRS_CYCLONE = - -# Storm name, a space or comma-separated list of storm names to evaluate. If left empty, all storms will be used. -TC_PAIRS_STORM_NAME = - -# DLAND file, the full path of the file that contains the gridded representation of the -# minimum distance from land. -TC_PAIRS_DLAND_FILE = MET_BASE/tc_data/dland_global_tenth_degree.nc - - -# -# FILENAME TEMPLATES -# -[filename_templates] -# We DO NOT want to interpret time info or expand{} these values. -# Use, getraw('filename_templates','FCST_EXTRACT_TILES_INPUT_TEMPLATE') to get -# 'gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%HHH}.grb2' -# FCST_EXTRACT_TILES_INPUT_TEMPLATE = gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%HHH}.grb2 -# GFS_FCST_NC_FILE_TMPL = gfs_4_{init?fmt=%Y%m%d}_{init?fmt=%H}00_{lead?fmt=%HHH}.nc -# OBS_EXTRACT_TILES_INPUT_TEMPLATE = gfs_4_{valid?fmt=%Y%m%d}_{valid?fmt=%H}00_000.grb2 -# GFS_ANLY_NC_FILE_TMPL = gfs_4_{valid?fmt=%Y%m%d}_{valid?fmt=%H}00_000.nc - -TC_PAIRS_ADECK_TEMPLATE = {date?fmt=%Y%m}/a{basin?fmt=%s}q{date?fmt=%Y%m}*.gfso.{cyclone?fmt=%s} -TC_PAIRS_BDECK_TEMPLATE = {date?fmt=%Y%m}/b{basin?fmt=%s}q{date?fmt=%Y%m}*.gfso.{cyclone?fmt=%s} -TC_PAIRS_OUTPUT_TEMPLATE = {date?fmt=%Y%m}/{basin?fmt=%s}q{date?fmt=%Y%m%d%H}.gfso.{cyclone?fmt=%s} - -# -# DIRECTORIES -# -[dir] - -# Location of your model data of interest -#EXTRACT_TILES_GRID_INPUT_DIR = {METPLUS_BASE}/sample_data/GFS/reduced_model_data -#EXTRACT_TILES_GRID_INPUT_DIR = /d1/SBU/GFS/reduced_model_data -# Commonly used base METplus variables - -# track data, set to your data source -TC_PAIRS_ADECK_INPUT_DIR = {INPUT_BASE}/met_test/new/track_data -TC_PAIRS_BDECK_INPUT_DIR = {INPUT_BASE}/met_test/new/track_data - - -#TRACK_DATA_DIR = {METPLUS_BASE}/sample_data/GFS/track_data -#TC_PAIRS_ADECK_INPUT_DIR = /d1/SBU/GFS/track_data -#TC_PAIRS_ADECK_INPUT_DIR = /d1/METplus_TC/adeck -#TC_PAIRS_BDECK_INPUT_DIR = /d1/SBU/GFS/track_data -#TC_PAIRS_BDECK_INPUT_DIR = /d1/METplus_TC/bdeck -TC_PAIRS_REFORMAT_DIR = {OUTPUT_BASE}/track_data_atcf -#TRACK_DATA_SUBDIR_MOD = {PROJ_DIR}/track_data_atcf -TC_PAIRS_OUTPUT_DIR = {OUTPUT_BASE}/tc_pairs - diff --git a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py index 6ff2364a53..7b819cd660 100644 --- a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py @@ -384,12 +384,14 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, ] ) @pytest.mark.wrapper -def test_tc_pairs_loop_order_processes(metplus_config_files, config_overrides, +def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, env_var_values): + my_config = metplus_config # run using init and valid time variables for loop_by in ['INIT', 'VALID']: remove_beg = remove_end = remove_match_points = False - config = metplus_config_files([]) + #config = metplus_config_files([]) + config = my_config.copy() set_minimum_config_settings(config, loop_by) From f1a6892cd414554756a4e1461b1809671d192d73 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 16:42:40 -0600 Subject: [PATCH 90/92] allow now and today keywords in string template substitution for StatAnalysis --- metplus/wrappers/stat_analysis_wrapper.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index 8aa5396659..4696d7d424 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -440,7 +440,13 @@ def build_stringsub_dict(self, config_dict): """ date_type = self.c_dict['DATE_TYPE'] - stringsub_dict = {} + clock_dt = datetime.strptime( + self.config.getstr('config', 'CLOCK_TIME'), '%Y%m%d%H%M%S' + ) + stringsub_dict = { + 'now': clock_dt, + 'today': clock_dt.strftime('%Y%m%d') + } # add all loop list and group list items to string sub keys list for list_item in self.EXPECTED_CONFIG_LISTS: list_name = list_item.replace('_LIST', '').lower() From 61c6d12d26f324878df378069b440af8a5b730d5 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Fri, 21 Oct 2022 17:47:50 -0600 Subject: [PATCH 91/92] updated tests to avoid using config files so they can be auto-scrubbed if the test passes, scrub files that must be generated using the old method, add documentation to pytest fixtures --- internal/tests/pytests/conftest.py | 62 +-- .../tests/pytests/util/config/test_config.py | 12 +- .../stat_analysis/test_stat_analysis.py | 37 +- .../tc_pairs/test_tc_pairs_wrapper.py | 416 +++++++++++------- .../wrappers/tc_stat/tc_stat_conf.conf | 173 -------- .../wrappers/tc_stat/test_tc_stat_wrapper.py | 45 +- 6 files changed, 356 insertions(+), 389 deletions(-) delete mode 100755 internal/tests/pytests/wrappers/tc_stat/tc_stat_conf.conf diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index d7084af9fa..1f431b9b79 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -57,31 +57,11 @@ shutil.rmtree(test_output_dir) -@pytest.fixture(scope='function') -def metplus_config_files(): - """! Create a METplus configuration object that can be - manipulated/modified to - reflect different paths, directories, values, etc. for individual - tests. - """ - def read_configs(extra_configs): - # Read in minimum pytest config file and any other extra configs - script_dir = os.path.dirname(__file__) - minimum_conf = os.path.join(script_dir, 'minimum_pytest.conf') - args = [minimum_conf] - for extra_config in extra_configs: - if extra_config.startswith('use_cases'): - args.append(os.path.join(metplus_dir, 'parm', extra_config)) - elif extra_config: - args.append(extra_config) - - config = config_metplus.setup(args) - return config - - return read_configs - @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): + """! This is used to capture the status of a test so the metplus_config + fixture can remove output data from tests that pass. + """ # execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() @@ -91,13 +71,16 @@ def pytest_runtest_makereport(item, call): setattr(item, "rep_" + rep.when, rep) -#@pytest.fixture(scope='function') + @pytest.fixture() def metplus_config(request): - """! Create a METplus configuration object that can be - manipulated/modified to - reflect different paths, directories, values, etc. for individual - tests. + """! Create a METplus configuration object using only the minimum required + settings found in minimum_pytest.conf. This fixture checks the result of + the test it is used in and automatically removes the output that is + generated by it unless the test fails. This makes it much easier to review + the failed tests. To use this fixture, add metplus_config to the test + function arguments and set a variable called config to metplus_config, e.g. + config = metplus_config. """ script_dir = os.path.dirname(__file__) args = [os.path.join(script_dir, 'minimum_pytest.conf')] @@ -110,3 +93,26 @@ def metplus_config(request): config_output_base = config.getdir('OUTPUT_BASE') if config_output_base and os.path.exists(config_output_base): shutil.rmtree(config_output_base) + + +@pytest.fixture(scope='function') +def metplus_config_files(): + """! Create a METplus configuration object using minimum_pytest.conf + settings and any list of config files.The metplus_config fixture is + preferred because it automatically cleans up the output files generated + by the use case unless the test fails. To use this in a test, add + metplus_config_files as an argument to the test function and pass in a list + of config files to it. Example: config = metplus_config_files([my_file]) + """ + def read_configs(extra_configs): + # Read in minimum pytest config file and any other extra configs + script_dir = os.path.dirname(__file__) + minimum_conf = os.path.join(script_dir, 'minimum_pytest.conf') + args = [] + for extra_config in extra_configs: + args.append(extra_config) + args.append(minimum_conf) + config = config_metplus.setup(args) + return config + + return read_configs diff --git a/internal/tests/pytests/util/config/test_config.py b/internal/tests/pytests/util/config/test_config.py index 2d0a927f49..0465bc62be 100644 --- a/internal/tests/pytests/util/config/test_config.py +++ b/internal/tests/pytests/util/config/test_config.py @@ -4,7 +4,7 @@ import os from configparser import NoOptionError -from shutil import which +from shutil import which, rmtree from metplus.util import met_util as util @@ -251,6 +251,9 @@ def test_move_all_to_config_section(metplus_config_files, config_key, config_files = [os.path.join(test_dir, item) for item in config_files] config = metplus_config_files(config_files) assert config.getstr('config', config_key) == expected_result + output_base = config.getdir('OUTPUT_BASE') + if output_base and os.path.exists(output_base): + rmtree(output_base) @pytest.mark.parametrize( @@ -286,6 +289,9 @@ def test_move_all_to_config_section_cmd_line(metplus_config_files, overrides, config = metplus_config_files(overrides) assert config.getstr('config', config_key, '') == expected_result + output_base = config.getdir('OUTPUT_BASE') + if output_base and os.path.exists(output_base): + rmtree(output_base) @pytest.mark.parametrize( 'config_name, expected_result', [ @@ -341,3 +347,7 @@ def test_getraw_nested_curly_braces(metplus_config_files, config = metplus_config_files(config_files) sec, name = config_name.split('.', 1) assert config.getraw(sec, name) == expected_result + + output_base = config.getdir('OUTPUT_BASE') + if output_base and os.path.exists(output_base): + rmtree(output_base) diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index be9d01c3ff..4ba245226c 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -16,17 +16,42 @@ pp = pprint.PrettyPrinter() -def stat_analysis_wrapper(metplus_config_files): + +def stat_analysis_wrapper(metplus_config): """! Returns a default StatAnalysisWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration to over-ride these /path/to values.""" - - # Default, empty StatAnalysisWrapper with some configuration values set - # to /path/to: - extra_configs = [TEST_CONF] - config = metplus_config_files(extra_configs) + config = metplus_config handle_tmp_dir(config) + config.set('config', 'PROCESS_LIST', 'StatAnalysis') + config.set('config', 'STAT_ANALYSIS_OUTPUT_DIR', + '{OUTPUT_BASE}/stat_analysis') + config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', + '{METPLUS_BASE}/internal/tests/data/stat_data') + config.set('config', 'LOOP_BY', 'VALID') + config.set('config', 'VALID_TIME_FMT', '%Y%m%d') + config.set('config', 'VALID_BEG', '20190101') + config.set('config', 'VALID_END', '20190101') + config.set('config', 'VALID_INCREMENT', '86400') + config.set('config', 'MODEL1', 'MODEL_TEST') + config.set('config', 'MODEL1_REFERENCE_NAME', 'MODELTEST') + config.set('config', 'MODEL1_OBTYPE', 'MODEL_TEST_ANL') + config.set('config', 'STAT_ANALYSIS_CONFIG_FILE', + '{PARM_BASE}/met_config/STATAnalysisConfig_wrapped') + config.set('config', 'STAT_ANALYSIS_JOB_NAME', 'filter') + config.set('config', 'STAT_ANALYSIS_JOB_ARGS', '-dump_row [dump_row_file]') + config.set('config', 'MODEL_LIST', '{MODEL1}') + config.set('config', 'FCST_VALID_HOUR_LIST', '00') + config.set('config', 'FCST_INIT_HOUR_LIST', '00, 06, 12, 18') + config.set('config', 'GROUP_LIST_ITEMS', 'FCST_INIT_HOUR_LIST') + config.set('config', 'LOOP_LIST_ITEMS', 'FCST_VALID_HOUR_LIST, MODEL_LIST') + config.set('config', 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE', + ('{fcst_valid_hour?fmt=%H}Z/{MODEL1}/' + '{MODEL1}_{valid?fmt=%Y%m%d}.stat')) + config.set('config', 'MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE', + ('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}' + '{valid_hour?fmt=%H}_init{fcst_init_hour?fmt=%s}.stat')) return StatAnalysisWrapper(config) diff --git a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py index 7b819cd660..b7ad438f2a 100644 --- a/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_pairs/test_tc_pairs_wrapper.py @@ -297,47 +297,48 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, @pytest.mark.parametrize( - 'config_overrides, env_var_values', [ + 'loop_by, config_overrides, env_var_values', [ + # LOOP_BY = INIT # 0: no config overrides that set env vars - ({}, {}), + ('INIT', {}, {}), # 1: description - ({'TC_PAIRS_DESC': 'my_desc'}, + ('INIT', {'TC_PAIRS_DESC': 'my_desc'}, {'METPLUS_DESC': 'desc = "my_desc";'}), # 2: only basin that corresponds to existing test file is used - ({'TC_PAIRS_BASIN': 'AL, ML'}, + ('INIT', {'TC_PAIRS_BASIN': 'AL, ML'}, {'METPLUS_BASIN': 'basin = ["ML"];'}), # 3: only cyclone that corresponds to existing test file is used - ({'TC_PAIRS_CYCLONE': '1005, 0104'}, + ('INIT', {'TC_PAIRS_CYCLONE': '1005, 0104'}, {'METPLUS_CYCLONE': 'cyclone = ["0104"];'}), # 4: model list - ({'MODEL': 'MOD1, MOD2'}, + ('INIT', {'MODEL': 'MOD1, MOD2'}, {'METPLUS_MODEL': 'model = ["MOD1", "MOD2"];'}), # 5: init begin - ({'TC_PAIRS_INIT_BEG': '20141031_14'}, + ('INIT', {'TC_PAIRS_INIT_BEG': '20141031_14'}, {'METPLUS_INIT_BEG': 'init_beg = "20141031_14";'}), # 6: init end - ({'TC_PAIRS_INIT_END': '20151031_14'}, + ('INIT', {'TC_PAIRS_INIT_END': '20151031_14'}, {'METPLUS_INIT_END': 'init_end = "20151031_14";'}), # 7: dland file - ({'TC_PAIRS_DLAND_FILE': 'my_dland.nc'}, + ('INIT', {'TC_PAIRS_DLAND_FILE': 'my_dland.nc'}, {'METPLUS_DLAND_FILE': 'dland_file = "my_dland.nc";'}), # 8: init_exc - ({'TC_PAIRS_INIT_EXCLUDE': '20141031_14'}, + ('INIT', {'TC_PAIRS_INIT_EXCLUDE': '20141031_14'}, {'METPLUS_INIT_EXC': 'init_exc = ["20141031_14"];'}), # 9: init_inc - ({'TC_PAIRS_INIT_INCLUDE': '20141031_14'}, + ('INIT', {'TC_PAIRS_INIT_INCLUDE': '20141031_14'}, {'METPLUS_INIT_INC': 'init_inc = ["20141031_14"];'}), # 10: storm name - ({'TC_PAIRS_STORM_NAME': 'KATRINA, OTHER'}, + ('INIT', {'TC_PAIRS_STORM_NAME': 'KATRINA, OTHER'}, {'METPLUS_STORM_NAME': 'storm_name = ["KATRINA", "OTHER"];'}), # 11: valid begin - ({'TC_PAIRS_VALID_BEG': '20141031_14'}, + ('INIT', {'TC_PAIRS_VALID_BEG': '20141031_14'}, {'METPLUS_VALID_BEG': 'valid_beg = "20141031_14";'}), # 12: valid end - ({'TC_PAIRS_VALID_END': '20141031_14'}, + ('INIT', {'TC_PAIRS_VALID_END': '20141031_14'}, {'METPLUS_VALID_END': 'valid_end = "20141031_14";'}), # 13: consensus 1 dictionary - ({'TC_PAIRS_CONSENSUS1_NAME': 'name1', + ('INIT', {'TC_PAIRS_CONSENSUS1_NAME': 'name1', 'TC_PAIRS_CONSENSUS1_MEMBERS': 'member1a, member1b', 'TC_PAIRS_CONSENSUS1_REQUIRED': 'true, false', 'TC_PAIRS_CONSENSUS1_MIN_REQ': '1'}, @@ -346,7 +347,7 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, 'required = [true, false];min_req = 1;}];' )}), # 14: consensus 2 dictionaries - ({'TC_PAIRS_CONSENSUS1_NAME': 'name1', + ('INIT', {'TC_PAIRS_CONSENSUS1_NAME': 'name1', 'TC_PAIRS_CONSENSUS1_MEMBERS': 'member1a, member1b', 'TC_PAIRS_CONSENSUS1_REQUIRED': 'true, false', 'TC_PAIRS_CONSENSUS1_MIN_REQ': '1', @@ -363,193 +364,276 @@ def test_tc_pairs_storm_id_lists(metplus_config, config_overrides, 'required = [false, true];min_req = 2;}];' )}), # 15: valid_exc - ({'TC_PAIRS_VALID_EXCLUDE': '20141031_14'}, + ('INIT', {'TC_PAIRS_VALID_EXCLUDE': '20141031_14'}, {'METPLUS_VALID_EXC': 'valid_exc = ["20141031_14"];'}), # 16: valid_inc - ({'TC_PAIRS_VALID_INCLUDE': '20141031_14'}, + ('INIT', {'TC_PAIRS_VALID_INCLUDE': '20141031_14'}, {'METPLUS_VALID_INC': 'valid_inc = ["20141031_14"];'}), # 17: write_valid - ({'TC_PAIRS_WRITE_VALID': '20141031_14'}, + ('INIT', {'TC_PAIRS_WRITE_VALID': '20141031_14'}, {'METPLUS_WRITE_VALID': 'write_valid = ["20141031_14"];'}), # 18: check_dup - ({'TC_PAIRS_CHECK_DUP': 'False', }, + ('INIT', {'TC_PAIRS_CHECK_DUP': 'False', }, {'METPLUS_CHECK_DUP': 'check_dup = FALSE;'}), # 19: interp12 - ({'TC_PAIRS_INTERP12': 'replace', }, + ('INIT', {'TC_PAIRS_INTERP12': 'replace', }, {'METPLUS_INTERP12': 'interp12 = REPLACE;'}), # 20 match_points - ({'TC_PAIRS_MATCH_POINTS': 'False', }, + ('INIT', {'TC_PAIRS_MATCH_POINTS': 'False', }, + {'METPLUS_MATCH_POINTS': 'match_points = FALSE;'}), + # LOOP_BY = VALID + # 21: no config overrides that set env vars + ('VALID', {}, {}), + # 22: description + ('VALID', {'TC_PAIRS_DESC': 'my_desc'}, + {'METPLUS_DESC': 'desc = "my_desc";'}), + # 23: only basin that corresponds to existing test file is used + ('VALID', {'TC_PAIRS_BASIN': 'AL, ML'}, + {'METPLUS_BASIN': 'basin = ["ML"];'}), + # 24: only cyclone that corresponds to existing test file is used + ('VALID', {'TC_PAIRS_CYCLONE': '1005, 0104'}, + {'METPLUS_CYCLONE': 'cyclone = ["0104"];'}), + # 25: model list + ('VALID', {'MODEL': 'MOD1, MOD2'}, + {'METPLUS_MODEL': 'model = ["MOD1", "MOD2"];'}), + # 26: init begin + ('VALID', {'TC_PAIRS_INIT_BEG': '20141031_14'}, + {'METPLUS_INIT_BEG': 'init_beg = "20141031_14";'}), + # 27: init end + ('VALID', {'TC_PAIRS_INIT_END': '20151031_14'}, + {'METPLUS_INIT_END': 'init_end = "20151031_14";'}), + # 28: dland file + ('VALID', {'TC_PAIRS_DLAND_FILE': 'my_dland.nc'}, + {'METPLUS_DLAND_FILE': 'dland_file = "my_dland.nc";'}), + # 29: init_exc + ('VALID', {'TC_PAIRS_INIT_EXCLUDE': '20141031_14'}, + {'METPLUS_INIT_EXC': 'init_exc = ["20141031_14"];'}), + # 30: init_inc + ('VALID', {'TC_PAIRS_INIT_INCLUDE': '20141031_14'}, + {'METPLUS_INIT_INC': 'init_inc = ["20141031_14"];'}), + # 31: storm name + ('VALID', {'TC_PAIRS_STORM_NAME': 'KATRINA, OTHER'}, + {'METPLUS_STORM_NAME': 'storm_name = ["KATRINA", "OTHER"];'}), + # 32: valid begin + ('VALID', {'TC_PAIRS_VALID_BEG': '20141031_14'}, + {'METPLUS_VALID_BEG': 'valid_beg = "20141031_14";'}), + # 33: valid end + ('VALID', {'TC_PAIRS_VALID_END': '20141031_14'}, + {'METPLUS_VALID_END': 'valid_end = "20141031_14";'}), + # 34: consensus 1 dictionary + ('VALID', {'TC_PAIRS_CONSENSUS1_NAME': 'name1', + 'TC_PAIRS_CONSENSUS1_MEMBERS': 'member1a, member1b', + 'TC_PAIRS_CONSENSUS1_REQUIRED': 'true, false', + 'TC_PAIRS_CONSENSUS1_MIN_REQ': '1'}, + {'METPLUS_CONSENSUS_LIST': ( + 'consensus = [{name = "name1";members = ["member1a", "member1b"];' + 'required = [true, false];min_req = 1;}];' + )}), + # 35: consensus 2 dictionaries + ('VALID', {'TC_PAIRS_CONSENSUS1_NAME': 'name1', + 'TC_PAIRS_CONSENSUS1_MEMBERS': 'member1a, member1b', + 'TC_PAIRS_CONSENSUS1_REQUIRED': 'true, false', + 'TC_PAIRS_CONSENSUS1_MIN_REQ': '1', + 'TC_PAIRS_CONSENSUS2_NAME': 'name2', + 'TC_PAIRS_CONSENSUS2_MEMBERS': 'member2a, member2b', + 'TC_PAIRS_CONSENSUS2_REQUIRED': 'false, true', + 'TC_PAIRS_CONSENSUS2_MIN_REQ': '2' + }, + {'METPLUS_CONSENSUS_LIST': ( + 'consensus = [' + '{name = "name1";members = ["member1a", "member1b"];' + 'required = [true, false];min_req = 1;}' + '{name = "name2";members = ["member2a", "member2b"];' + 'required = [false, true];min_req = 2;}];' + )}), + # 36: valid_exc + ('VALID', {'TC_PAIRS_VALID_EXCLUDE': '20141031_14'}, + {'METPLUS_VALID_EXC': 'valid_exc = ["20141031_14"];'}), + # 37: valid_inc + ('VALID', {'TC_PAIRS_VALID_INCLUDE': '20141031_14'}, + {'METPLUS_VALID_INC': 'valid_inc = ["20141031_14"];'}), + # 38: write_valid + ('VALID', {'TC_PAIRS_WRITE_VALID': '20141031_14'}, + {'METPLUS_WRITE_VALID': 'write_valid = ["20141031_14"];'}), + # 39: check_dup + ('VALID', {'TC_PAIRS_CHECK_DUP': 'False', }, + {'METPLUS_CHECK_DUP': 'check_dup = FALSE;'}), + # 40: interp12 + ('VALID', {'TC_PAIRS_INTERP12': 'replace', }, + {'METPLUS_INTERP12': 'interp12 = REPLACE;'}), + # 41 match_points + ('VALID', {'TC_PAIRS_MATCH_POINTS': 'False', }, {'METPLUS_MATCH_POINTS': 'match_points = FALSE;'}), - ] ) @pytest.mark.wrapper -def test_tc_pairs_loop_order_processes(metplus_config, config_overrides, - env_var_values): - my_config = metplus_config - # run using init and valid time variables - for loop_by in ['INIT', 'VALID']: - remove_beg = remove_end = remove_match_points = False - #config = metplus_config_files([]) - config = my_config.copy() - - set_minimum_config_settings(config, loop_by) - - test_data_dir = get_data_dir(config) - bdeck_dir = os.path.join(test_data_dir, 'bdeck') - adeck_dir = os.path.join(test_data_dir, 'adeck') - - config.set('config', 'TC_PAIRS_BDECK_INPUT_DIR', bdeck_dir) - config.set('config', 'TC_PAIRS_ADECK_INPUT_DIR', adeck_dir) - - # LOOP_ORDER processes runs once, times runs once per time - config.set('config', 'LOOP_ORDER', 'processes') - - # set config variable overrides - for key, value in config_overrides.items(): - config.set('config', key, value) - - if f'METPLUS_{loop_by}_BEG' not in env_var_values: - env_var_values[f'METPLUS_{loop_by}_BEG'] = ( - f'{loop_by.lower()}_beg = "{run_times[0]}";' - ) - remove_beg = True - - if f'METPLUS_{loop_by}_END' not in env_var_values: - env_var_values[f'METPLUS_{loop_by}_END'] = ( - f'{loop_by.lower()}_end = "{run_times[-1]}";' - ) - remove_end = True - - if f'METPLUS_MATCH_POINTS' not in env_var_values: - env_var_values[f'METPLUS_MATCH_POINTS'] = ( - 'match_points = TRUE;' - ) - remove_match_points = True - - wrapper = TCPairsWrapper(config) - assert wrapper.isOK - - app_path = os.path.join(config.getdir('MET_BIN_DIR'), wrapper.app_name) - verbosity = f"-v {wrapper.c_dict['VERBOSITY']}" - config_file = wrapper.c_dict.get('CONFIG_FILE') - out_dir = wrapper.c_dict.get('OUTPUT_DIR') - expected_cmds = [(f"{app_path} {verbosity} " - f"-bdeck {bdeck_dir}/bmlq2014123118.gfso.0104 " - f"-adeck {adeck_dir}/amlq2014123118.gfso.0104 " - f"-config {config_file} " - f"-out {out_dir}/mlq2014121318.gfso.0104"), - ] - - all_cmds = wrapper.run_all_times() - print(f"ALL COMMANDS: {all_cmds}") - assert len(all_cmds) == len(expected_cmds) - - for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): - # ensure commands are generated as expected - assert cmd == expected_cmd - - # check that environment variables were set properly - for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: - match = next((item for item in env_vars if - item.startswith(env_var_key)), None) - assert match is not None - print(f'Checking env var: {env_var_key}') - actual_value = match.split('=', 1)[1] - assert env_var_values.get(env_var_key, '') == actual_value - - if remove_beg: - del env_var_values[f'METPLUS_{loop_by}_BEG'] - if remove_end: - del env_var_values[f'METPLUS_{loop_by}_END'] - if remove_match_points: - del env_var_values['METPLUS_MATCH_POINTS'] +def test_tc_pairs_loop_order_processes(metplus_config, loop_by, + config_overrides, env_var_values): + config = metplus_config + remove_beg = remove_end = remove_match_points = False + + set_minimum_config_settings(config, loop_by) + + test_data_dir = get_data_dir(config) + bdeck_dir = os.path.join(test_data_dir, 'bdeck') + adeck_dir = os.path.join(test_data_dir, 'adeck') + + config.set('config', 'TC_PAIRS_BDECK_INPUT_DIR', bdeck_dir) + config.set('config', 'TC_PAIRS_ADECK_INPUT_DIR', adeck_dir) + + # LOOP_ORDER processes runs once, times runs once per time + config.set('config', 'LOOP_ORDER', 'processes') + + # set config variable overrides + for key, value in config_overrides.items(): + config.set('config', key, value) + + if f'METPLUS_{loop_by}_BEG' not in env_var_values: + env_var_values[f'METPLUS_{loop_by}_BEG'] = ( + f'{loop_by.lower()}_beg = "{run_times[0]}";' + ) + remove_beg = True + + if f'METPLUS_{loop_by}_END' not in env_var_values: + env_var_values[f'METPLUS_{loop_by}_END'] = ( + f'{loop_by.lower()}_end = "{run_times[-1]}";' + ) + remove_end = True + + if f'METPLUS_MATCH_POINTS' not in env_var_values: + env_var_values[f'METPLUS_MATCH_POINTS'] = ( + 'match_points = TRUE;' + ) + remove_match_points = True + + wrapper = TCPairsWrapper(config) + assert wrapper.isOK + + app_path = os.path.join(config.getdir('MET_BIN_DIR'), wrapper.app_name) + verbosity = f"-v {wrapper.c_dict['VERBOSITY']}" + config_file = wrapper.c_dict.get('CONFIG_FILE') + out_dir = wrapper.c_dict.get('OUTPUT_DIR') + expected_cmds = [(f"{app_path} {verbosity} " + f"-bdeck {bdeck_dir}/bmlq2014123118.gfso.0104 " + f"-adeck {adeck_dir}/amlq2014123118.gfso.0104 " + f"-config {config_file} " + f"-out {out_dir}/mlq2014121318.gfso.0104"), + ] + + all_cmds = wrapper.run_all_times() + print(f"ALL COMMANDS: {all_cmds}") + assert len(all_cmds) == len(expected_cmds) + + for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): + # ensure commands are generated as expected + assert cmd == expected_cmd + + # check that environment variables were set properly + for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: + match = next((item for item in env_vars if + item.startswith(env_var_key)), None) + assert match is not None + print(f'Checking env var: {env_var_key}') + actual_value = match.split('=', 1)[1] + assert env_var_values.get(env_var_key, '') == actual_value + + if remove_beg: + del env_var_values[f'METPLUS_{loop_by}_BEG'] + if remove_end: + del env_var_values[f'METPLUS_{loop_by}_END'] + if remove_match_points: + del env_var_values['METPLUS_MATCH_POINTS'] @pytest.mark.parametrize( - 'config_overrides, env_var_values', [ - # 0: no config overrides that set env vars - ({}, {}), + 'loop_by, config_overrides, env_var_values', [ + # 0: no config overrides that set env vars loop by = INIT + ('INIT', {}, {}), # 1: storm_id list - ({'TC_PAIRS_STORM_ID': 'AL092014, ML082015'}, + ('INIT', {'TC_PAIRS_STORM_ID': 'AL092014, ML082015'}, {'METPLUS_STORM_ID': 'storm_id = ["AL092014", "ML082015"];'}), # 2: basin list - ({'TC_PAIRS_BASIN': 'AL, ML'}, + ('INIT', {'TC_PAIRS_BASIN': 'AL, ML'}, {'METPLUS_BASIN': 'basin = ["AL", "ML"];'}), # 3: cyclone list - ({'TC_PAIRS_CYCLONE': '1005, 0104'}, + ('INIT', {'TC_PAIRS_CYCLONE': '1005, 0104'}, + {'METPLUS_CYCLONE': 'cyclone = ["1005", "0104"];'}), + # 4: no config overrides that set env vars loop by = VALID + ('VALID', {}, {}), + # 5: storm_id list + ('VALID', {'TC_PAIRS_STORM_ID': 'AL092014, ML082015'}, + {'METPLUS_STORM_ID': 'storm_id = ["AL092014", "ML082015"];'}), + # 6: basin list + ('VALID', {'TC_PAIRS_BASIN': 'AL, ML'}, + {'METPLUS_BASIN': 'basin = ["AL", "ML"];'}), + # 7: cyclone list + ('VALID', {'TC_PAIRS_CYCLONE': '1005, 0104'}, {'METPLUS_CYCLONE': 'cyclone = ["1005", "0104"];'}), ] ) @pytest.mark.wrapper -def test_tc_pairs_read_all_files(metplus_config_files, config_overrides, +def test_tc_pairs_read_all_files(metplus_config, loop_by, config_overrides, env_var_values): - # run using init and valid time variables - for loop_by in ['INIT', 'VALID']: - config = metplus_config_files([]) + config = metplus_config - set_minimum_config_settings(config, loop_by) + set_minimum_config_settings(config, loop_by) - test_data_dir = get_data_dir(config) - bdeck_dir = os.path.join(test_data_dir, 'bdeck') - adeck_dir = os.path.join(test_data_dir, 'adeck') + test_data_dir = get_data_dir(config) + bdeck_dir = os.path.join(test_data_dir, 'bdeck') + adeck_dir = os.path.join(test_data_dir, 'adeck') - config.set('config', 'TC_PAIRS_BDECK_INPUT_DIR', bdeck_dir) - config.set('config', 'TC_PAIRS_ADECK_INPUT_DIR', adeck_dir) + config.set('config', 'TC_PAIRS_BDECK_INPUT_DIR', bdeck_dir) + config.set('config', 'TC_PAIRS_ADECK_INPUT_DIR', adeck_dir) - # LOOP_ORDER processes runs once, times runs once per time - config.set('config', 'LOOP_ORDER', 'processes') + # LOOP_ORDER processes runs once, times runs once per time + config.set('config', 'LOOP_ORDER', 'processes') - config.set('config', 'TC_PAIRS_READ_ALL_FILES', True) - config.set('config', 'TC_PAIRS_OUTPUT_TEMPLATE', '') + config.set('config', 'TC_PAIRS_READ_ALL_FILES', True) + config.set('config', 'TC_PAIRS_OUTPUT_TEMPLATE', '') - # set config variable overrides - for key, value in config_overrides.items(): - config.set('config', key, value) + # set config variable overrides + for key, value in config_overrides.items(): + config.set('config', key, value) - env_var_values[f'METPLUS_{loop_by}_BEG'] = ( - f'{loop_by.lower()}_beg = "{run_times[0]}";' - ) + env_var_values[f'METPLUS_{loop_by}_BEG'] = ( + f'{loop_by.lower()}_beg = "{run_times[0]}";' + ) - env_var_values[f'METPLUS_{loop_by}_END'] = ( - f'{loop_by.lower()}_end = "{run_times[-1]}";' - ) + env_var_values[f'METPLUS_{loop_by}_END'] = ( + f'{loop_by.lower()}_end = "{run_times[-1]}";' + ) - env_var_values['METPLUS_MATCH_POINTS'] = ( - 'match_points = TRUE;' - ) + env_var_values['METPLUS_MATCH_POINTS'] = ( + 'match_points = TRUE;' + ) - wrapper = TCPairsWrapper(config) - assert wrapper.isOK - - app_path = os.path.join(config.getdir('MET_BIN_DIR'), wrapper.app_name) - verbosity = f"-v {wrapper.c_dict['VERBOSITY']}" - config_file = wrapper.c_dict.get('CONFIG_FILE') - out_dir = wrapper.c_dict.get('OUTPUT_DIR') - expected_cmds = [(f"{app_path} {verbosity} " - f"-bdeck {bdeck_dir} " - f"-adeck {adeck_dir} " - f"-config {config_file} " - f"-out {out_dir}/tc_pairs"), - ] - - all_cmds = wrapper.run_all_times() - print(f"ALL COMMANDS: {all_cmds}") - assert len(all_cmds) == len(expected_cmds) - - for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): - # check that environment variables were set properly - for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: - match = next((item for item in env_vars if - item.startswith(env_var_key)), None) - assert match is not None - print(f'Checking env var: {env_var_key}') - actual_value = match.split('=', 1)[1] - assert env_var_values.get(env_var_key, '') == actual_value - - # unset begin and end for next loop - del env_var_values[f'METPLUS_{loop_by}_BEG'] - del env_var_values[f'METPLUS_{loop_by}_END'] + wrapper = TCPairsWrapper(config) + assert wrapper.isOK + + app_path = os.path.join(config.getdir('MET_BIN_DIR'), wrapper.app_name) + verbosity = f"-v {wrapper.c_dict['VERBOSITY']}" + config_file = wrapper.c_dict.get('CONFIG_FILE') + out_dir = wrapper.c_dict.get('OUTPUT_DIR') + expected_cmds = [(f"{app_path} {verbosity} " + f"-bdeck {bdeck_dir} " + f"-adeck {adeck_dir} " + f"-config {config_file} " + f"-out {out_dir}/tc_pairs"), + ] + + all_cmds = wrapper.run_all_times() + print(f"ALL COMMANDS: {all_cmds}") + assert len(all_cmds) == len(expected_cmds) + + for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): + # check that environment variables were set properly + for env_var_key in wrapper.WRAPPER_ENV_VAR_KEYS: + match = next((item for item in env_vars if + item.startswith(env_var_key)), None) + assert match is not None + print(f'Checking env var: {env_var_key}') + actual_value = match.split('=', 1)[1] + assert env_var_values.get(env_var_key, '') == actual_value @pytest.mark.wrapper diff --git a/internal/tests/pytests/wrappers/tc_stat/tc_stat_conf.conf b/internal/tests/pytests/wrappers/tc_stat/tc_stat_conf.conf deleted file mode 100755 index ea35a5b14a..0000000000 --- a/internal/tests/pytests/wrappers/tc_stat/tc_stat_conf.conf +++ /dev/null @@ -1,173 +0,0 @@ -# -# PRECONDITION: REQUIRES INSTALLATION OF R on user system -# - -# -# CONFIGURATION -# -[config] -# set looping method to processes-each 'task' in the process list runs to -# completion (for all init times) before the next 'task' is run -LOOP_METHOD = processes - -# List of 'tasks' to run -PROCESS_LIST = TcStat - -# The init time begin and end times, increment, and last init hour. -INIT_BEG = 20150301 -INIT_END = 20150304 -# This is the step-size. Increment in seconds from the begin time to the end time -INIT_INCREMENT = 21600 ;; set to every 6 hours=21600 seconds - -# This is the last hour in your initialization time that you want to include in your time window -#INIT_HOUR_END = 18 - -# A list of times to include, in format YYYYMMDD_hh -#INIT_INCLUDE = - -# A list of times to exclude, in format YYYYMMDD_hh -#INIT_EXCLUDE = - -# -# Specify model valid time window in format YYYYMM[DD[_hh]]. Only tracks that fall within the valid time window will -# be used. -# -#VALID_BEG = -#VALID_END = - -# Run tc_stat using a config file or as command line -# if running via MET tc_stat config file, set to CONFIG. Leave blank or -# anything other than CONFIG if running via command line. -TC_STAT_CONFIG_FILE = {PARM_BASE}/met_config/TCStatConfig_wrapped - - -# !!!!!!!IMPORTANT!!!!!! -# Please refer to the README_TC located in ${MET_INSTALL_DIR}/share/met/config -# for details on setting up your analysis jobs. - -# For arithmetic expressions such as: -# -column 'ABS(AMSLP-BMSLP)', enclose the expression in ''. Notice that there are no -# whitespaces within the arithmetic expression. White spaces are to be used to -# separate options from values (e.g. -job summary -by AMODEL,LEAD,AMSLP -init_hour 00 -column 'AMSLP-BMSLP'). -# eg. -lookin {OUTPUT_BASE}/tc_pairs -job filter -dump_row {OUTPUT_BASE}/tc_stat_filter.out -basin ML -init_hr 00 -# or -lookin {OUTPUT_BASE}/tc_pairs -job summary -by AMODEL,LEAD -column AMSLP -column AMAX_WIND -column 'ABS(AMAX_WIND-BMAX_WIND)' -out {OUTPUT_BASE}/tc_stat/tc_stat_summary.tcst - -# Only if TC_STAT_RUN_VIA = CLI -# TC_STAT_CMD_LINE_JOB = -job filter -dump_row {OUTPUT_BASE}/tc_stat/tc_stat_filter.out -basin ML -init_hour 00 - -#TC_STAT_RUN_VIA=COMMAND so no need to define this, but you MUST define -# TC_STAT_JOBS_LIST - -# -# FILL in the following values if running multiple jobs which -# requires a MET tc_stat config file. -# -# These all map to the options in the default TC-Stat config file, except these -# are pre-pended with TC_STAT to avoid clashing with any other similarly -# named options from other MET tools (eg TC_STAT_AMODEL corresponds to the -# amodel option in the default MET tc-stat config file, whereas AMODEL -# corresponds to the amodel option in the MET tc-pairs config file). - -# Stratify by these columns: -TC_STAT_AMODEL = -TC_STAT_BMODEL = -TC_STAT_DESC = -TC_STAT_STORM_ID = -TC_STAT_BASIN = -TC_STAT_CYCLONE = -TC_STAT_STORM_NAME = - -# Stratify by init times via a comma-separate list of init times to -# include or exclude. Time format defined as YYYYMMDD_HH or YYYYMMDD_HHmmss -TC_STAT_INIT_BEG = 20170705 -TC_STAT_INIT_END = 20170901 -TC_STAT_INIT_INCLUDE = -TC_STAT_INIT_EXCLUDE = -TC_STAT_INIT_HOUR = 00 -TC_STAT_INIT_MASK = -TC_STAT_VALID_MASK = -TC_STAT_VALID_BEG = -TC_STAT_VALID_END = -TC_STAT_VALID_INCLUDE = -TC_STAT_VALID_EXCLUDE = -TC_STAT_LEAD_REQ = - -# Stratify by the valid time and lead time via comma-separated list of -# times in format HH[MMSS] -TC_STAT_VALID_HOUR = -TC_STAT_LEAD = - -# Stratify over the watch_warn column in the tcst file. Setting this to -# 'ALL' will match HUWARN, HUWATCH, TSWARN, TSWATCH -TC_STAT_TRACK_WATCH_WARN = - -# Stratify by applying thresholds to numeric data columns. Specify with -# comma-separated list of column names and thresholds to be applied. -# The length of TC_STAT_COLUMN_THRESH_NAME should be the same as -# TC_STAT_COLUMN_THRESH_VAL. -TC_STAT_COLUMN_THRESH_NAME = -TC_STAT_COLUMN_THRESH_VAL = - -# Stratify by a list of comma-separated columns names and values corresponding -# to non-numeric data columns of the values of interest. -TC_STAT_COLUMN_STR_NAME = -TC_STAT_COLUMN_STR_VAL = - -# Stratify by applying thresholds to numeric data columns only when lead=0. -# If lead=0 and the value does not meet the threshold, discard the entire -# track. The length of TC_STAT_INIT_THRESH_NAME must equal the length of -# TC_STAT_INIT_THRESH_VAL. -TC_STAT_INIT_THRESH_NAME = -TC_STAT_INIT_THRESH_VAL = - -# Stratify by applying thresholds to numeric data columns only when lead = 0. -# If lead = 0 but the value doesn't meet the threshold, discard the entire -# track. -TC_STAT_INIT_STR_NAME = -TC_STAT_INIT_STR_VAL = - -# Excludes any points where distance to land is <=0. When set to TRUE, once land -# is encountered, the remainder of the forecast track is NOT used for the -# verification, even if the track moves back over water. -TC_STAT_WATER_ONLY = false - -# TRUE or FALSE. To specify whether only those track points occurring near -# landfall should be retained. Landfall is the last bmodel track point before -# the distance to land switches from water to land. -TC_STAT_LANDFALL = false - - -# Define the landfall retention window, which is defined as the hours offset -# from the time of landfall. Format is in HH[MMSS]. Default TC_STAT_LANDFALL_BEG -# is set to -24, and TC_STAT_LANDFALL_END is set to 00 -TC_STAT_LANDFALL_BEG = -24 -TC_STAT_LANDFALL_END = 00 - -# Specify whether only those track points common to both the ADECK and BDECK -# tracks should be written out -TC_STAT_MATCH_POINTS = false - -# IMPORTANT Refer to the README_TC for details on setting up analysis -# jobs (located in {MET_INSTALL_DIR}/share/met/config - -# Separate each option and value with whitespace, and each job with a whitespace. -# No whitespace within arithmetic expressions or lists of items -# (e.g. -by AMSLP,AMODEL,LEAD -column '(AMAX_WIND-BMAX_WIND)') -# Enclose your arithmetic expressions with '' and separate each job -# by whitespace: -# -job filter -dump_row /path/to, -job summary -line_type TCMPR -column 'ABS(AMAX_WIND-BMAX_WIND)' -out {OUTPUT_BASE}/tc_stat/file.tcst - -TC_STAT_JOB_ARGS = -job summary -line_type TCMPR -column 'ABS(AMAX_WIND-BMAX_WIND)' -dump_row {OUTPUT_BASE}/tc_stat/tc_stat_summary.tcst - - -# -# DIRECTORIES -# -[dir] - -# TC-Stat input data (uses output from tc-pairs) -TC_STAT_LOOKIN_DIR = {INPUT_BASE}/met_test/tc_pairs - -# TC-Stat output data (creates .tcst ASCII files which can be read or used as -# input to TCMPR_Plotter_wrapper (the Python wrapper to plot_tcmpr.R) to create plots. -TC_STAT_OUTPUT_DIR = {OUTPUT_BASE}/tc_stat diff --git a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py index a04736b361..ac7001898a 100644 --- a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py @@ -10,14 +10,29 @@ from metplus.util import ti_calculate -def get_config(metplus_config_files): - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), - 'tc_stat_conf.conf')) - return metplus_config_files(extra_configs) - - -def tc_stat_wrapper(metplus_config_files): +def get_config(metplus_config): + # extra_configs = [] + # extra_configs.append(os.path.join(os.path.dirname(__file__), + # 'tc_stat_conf.conf')) + config = metplus_config + config.set('config', 'PROCESS_LIST', 'TCStat') + config.set('config', 'INIT_BEG', '20150301') + config.set('config', 'INIT_END', '20150304') + config.set('config', 'INIT_INCREMENT', '21600') + config.set('config', 'TC_STAT_INIT_BEG', '20170705') + config.set('config', 'TC_STAT_INIT_END', '20170901') + config.set('config', 'TC_STAT_INIT_HOUR', '00') + config.set('config', 'TC_STAT_JOB_ARGS', + ("-job summary -line_type TCMPR -column " + "'ABS(AMAX_WIND-BMAX_WIND)' " + "-dump_row {OUTPUT_BASE}/tc_stat/tc_stat_summary.tcst")) + config.set('config', 'TC_STAT_LOOKIN_DIR', + '{INPUT_BASE}/met_test/tc_pairs') + config.set('config', 'TC_STAT_OUTPUT_DIR', '{OUTPUT_BASE}/tc_stat') + return config + + +def tc_stat_wrapper(metplus_config): """! Returns a default TCStatWrapper with /path/to entries in the metplus_system.conf and metplus_runtime.conf configuration files. Subsequent tests can customize the final METplus configuration @@ -25,7 +40,7 @@ def tc_stat_wrapper(metplus_config_files): # Default, empty TcStatWrapper with some configuration values set # to /path/to: - config = get_config(metplus_config_files) + config = get_config(metplus_config) return TCStatWrapper(config) @@ -110,8 +125,8 @@ def tc_stat_wrapper(metplus_config_files): ] ) @pytest.mark.wrapper -def test_override_config_in_c_dict(metplus_config_files, overrides, c_dict): - config = get_config(metplus_config_files) +def test_override_config_in_c_dict(metplus_config, overrides, c_dict): + config = get_config(metplus_config) instance = 'tc_stat_overrides' if not config.has_section(instance): config.add_section(instance) @@ -148,13 +163,13 @@ def test_override_config_in_c_dict(metplus_config_files, overrides, c_dict): ] ) @pytest.mark.wrapper -def test_handle_jobs(metplus_config_files, jobs, init_dt, expected_output): +def test_handle_jobs(metplus_config, jobs, init_dt, expected_output): if init_dt: time_info = ti_calculate({'init': init_dt}) else: time_info = None - wrapper = tc_stat_wrapper(metplus_config_files) + wrapper = tc_stat_wrapper(metplus_config) output_base = wrapper.config.getdir('OUTPUT_BASE') output_dir = os.path.join(output_base, 'test_handle_jobs') @@ -223,7 +238,7 @@ def cleanup_test_dirs(parent_dirs, output_dir): ] ) @pytest.mark.wrapper -def test_handle_jobs_create_parent_dir(metplus_config_files, jobs, init_dt, +def test_handle_jobs_create_parent_dir(metplus_config, jobs, init_dt, expected_output, parent_dirs): # if init time is provided, calculate other time dict items if init_dt: @@ -231,7 +246,7 @@ def test_handle_jobs_create_parent_dir(metplus_config_files, jobs, init_dt, else: time_info = None - wrapper = tc_stat_wrapper(metplus_config_files) + wrapper = tc_stat_wrapper(metplus_config) # create directory path relative to OUTPUT_BASE to test that function # creates parent directories properly From 46a8d64a2b74a4150aced46b3d8a123a419f2914 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 25 Oct 2022 13:07:52 -0600 Subject: [PATCH 92/92] simplified logic --- internal/tests/pytests/conftest.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index 1f431b9b79..8056e4cfe4 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -108,9 +108,7 @@ def read_configs(extra_configs): # Read in minimum pytest config file and any other extra configs script_dir = os.path.dirname(__file__) minimum_conf = os.path.join(script_dir, 'minimum_pytest.conf') - args = [] - for extra_config in extra_configs: - args.append(extra_config) + args = extra_configs.copy() args.append(minimum_conf) config = config_metplus.setup(args) return config