From b0325e0157598702cbba6c3cc09af0120881e2b4 Mon Sep 17 00:00:00 2001 From: "Henry R. Winterbottom" <49202169+HenryWinterbottom-NOAA@users.noreply.github.com> Date: Wed, 7 Feb 2024 00:40:20 -0700 Subject: [PATCH 01/16] Removes files module loading file no longer used by the GW (#2281) Removes `module-setup.csh.inc` and `module-setup.sh.inc`. The module `ush/module-setup.sh` is updated such that it now sources `ush/detect_machine.sh` to determine which supported platform the global-workflow is being execute on. Resolves #2130 --- modulefiles/module-setup.csh.inc | 87 ------------------------ modulefiles/module-setup.sh.inc | 110 ------------------------------- ush/load_fv3gfs_modules.sh | 2 +- ush/load_ufsda_modules.sh | 2 +- ush/module-setup.sh | 2 + 5 files changed, 4 insertions(+), 199 deletions(-) delete mode 100644 modulefiles/module-setup.csh.inc delete mode 100644 modulefiles/module-setup.sh.inc diff --git a/modulefiles/module-setup.csh.inc b/modulefiles/module-setup.csh.inc deleted file mode 100644 index 7086326627..0000000000 --- a/modulefiles/module-setup.csh.inc +++ /dev/null @@ -1,87 +0,0 @@ -set __ms_shell=csh - -eval "if ( -d / ) set __ms_shell=tcsh" - -if ( { test -d /lfs/f1 } ) then - # We are on NOAA Cactus or Dogwood - if ( ! { module help >& /dev/null } ) then - source /usr/share/lmod/lmod/init/$__ms_shell - fi - module reset -else if ( { test -d /lfs3 } ) then - if ( ! { module help >& /dev/null } ) then - source /apps/lmod/lmod/init/$__ms_shell - endif - module purge -else if ( { test -d /scratch1 } ) then - # We are on NOAA Hera - if ( ! { module help >& /dev/null } ) then - source /apps/lmod/lmod/init/$__ms_shell - endif - module purge -elif [[ -d /work ]] ; then - # We are on MSU Orion or Hercules - if [[ -d /apps/other ]] ; then - # Hercules - init_path="/apps/other/lmod/lmod/init/$__ms_shell" - else - # Orion - init_path="/apps/lmod/lmod/init/$__ms_shell" - fi - if ( ! eval module help > /dev/null 2>&1 ) ; then - source "${init_path}" - fi - module purge -else if ( { test -d /data/prod } ) then - # We are on SSEC S4 - if ( ! { module help >& /dev/null } ) then - source /usr/share/lmod/lmod/init/$__ms_shell - endif - source /etc/profile - module purge -else if ( { test -d /glade } ) then - # We are on NCAR Yellowstone - if ( ! { module help >& /dev/null } ) then - source /usr/share/Modules/init/$__ms_shell - endif - module purge -else if ( { test -d /lustre -a -d /ncrc } ) then - # We are on GAEA. - if ( ! { module help >& /dev/null } ) then - # We cannot simply load the module command. The GAEA - # /etc/csh.login modifies a number of module-related variables - # before loading the module command. Without those variables, - # the module command fails. Hence we actually have to source - # /etc/csh.login here. - source /etc/csh.login - set __ms_source_etc_csh_login=yes - else - set __ms_source_etc_csh_login=no - endif - module purge - unsetenv _LMFILES_ - unsetenv _LMFILES_000 - unsetenv _LMFILES_001 - unsetenv LOADEDMODULES - module load modules - if ( { test -d /opt/cray/ari/modulefiles } ) then - module use -a /opt/cray/ari/modulefiles - endif - if ( { test -d /opt/cray/pe/ari/modulefiles } ) then - module use -a /opt/cray/pe/ari/modulefiles - endif - if ( { test -d /opt/cray/pe/craype/default/modulefiles } ) then - module use -a /opt/cray/pe/craype/default/modulefiles - endif - setenv NCEPLIBS /lustre/f1/pdata/ncep_shared/NCEPLIBS/lib - if ( { test -d /lustre/f1/pdata/ncep_shared/NCEPLIBS/lib } ) then - module use $NCEPLIBS/modulefiles - endif - if ( "$__ms_source_etc_csh_login" == yes ) then - source /etc/csh.login - unset __ms_source_etc_csh_login - endif -else - # Workaround for csh limitation. Use sh to print to stderr. - sh -c 'echo WARNING: UNKNOWN PLATFORM 1>&2' -endif diff --git a/modulefiles/module-setup.sh.inc b/modulefiles/module-setup.sh.inc deleted file mode 100644 index db9dabffe1..0000000000 --- a/modulefiles/module-setup.sh.inc +++ /dev/null @@ -1,110 +0,0 @@ -# Create a test function for sh vs. bash detection. The name is -# randomly generated to reduce the chances of name collision. -__ms_function_name="setup__test_function__$$" -eval "$__ms_function_name() { /bin/true ; }" - -# Determine which shell we are using -__ms_ksh_test=$( eval '__text="text" ; if [[ $__text =~ ^(t).* ]] ; then printf "%s" ${.sh.match[1]} ; fi' 2> /dev/null | cat ) -__ms_bash_test=$( eval 'if ( set | grep '$__ms_function_name' | grep -v name > /dev/null 2>&1 ) ; then echo t ; fi ' 2> /dev/null | cat ) - -if [[ ! -z "$__ms_ksh_test" ]] ; then - __ms_shell=ksh -elif [[ ! -z "$__ms_bash_test" ]] ; then - __ms_shell=bash -else - # Not bash or ksh, so assume sh. - __ms_shell=sh -fi - -if [[ -d /lfs/f1 ]] ; then - # We are on NOAA Cactus or Dogwood - if ( ! eval module help > /dev/null 2>&1 ) ; then - source /usr/share/lmod/lmod/init/$__ms_shell - fi - module reset -elif [[ -d /mnt/lfs1 ]] ; then - # We are on NOAA Jet - if ( ! eval module help > /dev/null 2>&1 ) ; then - source /apps/lmod/lmod/init/$__ms_shell - fi - module purge -elif [[ -d /scratch1 ]] ; then - # We are on NOAA Hera - if ( ! eval module help > /dev/null 2>&1 ) ; then - source /apps/lmod/lmod/init/$__ms_shell - fi - module purge -elif [[ -d /work ]] ; then - # We are on MSU Orion or Hercules - if [[ -d /apps/other ]] ; then - # Hercules - init_path="/apps/other/lmod/lmod/init/$__ms_shell" - else - # Orion - init_path="/apps/lmod/lmod/init/$__ms_shell" - fi - if ( ! eval module help > /dev/null 2>&1 ) ; then - source "${init_path}" - fi - module purge -elif [[ -d /glade ]] ; then - # We are on NCAR Yellowstone - if ( ! eval module help > /dev/null 2>&1 ) ; then - . /usr/share/Modules/init/$__ms_shell - fi - module purge -elif [[ -d /lustre && -d /ncrc ]] ; then - # We are on GAEA. - if ( ! eval module help > /dev/null 2>&1 ) ; then - # We cannot simply load the module command. The GAEA - # /etc/profile modifies a number of module-related variables - # before loading the module command. Without those variables, - # the module command fails. Hence we actually have to source - # /etc/profile here. - source /etc/profile - __ms_source_etc_profile=yes - else - __ms_source_etc_profile=no - fi - module purge - # clean up after purge - unset _LMFILES_ - unset _LMFILES_000 - unset _LMFILES_001 - unset LOADEDMODULES - module load modules - if [[ -d /opt/cray/ari/modulefiles ]] ; then - module use -a /opt/cray/ari/modulefiles - fi - if [[ -d /opt/cray/pe/ari/modulefiles ]] ; then - module use -a /opt/cray/pe/ari/modulefiles - fi - if [[ -d /opt/cray/pe/craype/default/modulefiles ]] ; then - module use -a /opt/cray/pe/craype/default/modulefiles - fi - if [[ -s /etc/opt/cray/pe/admin-pe/site-config ]] ; then - source /etc/opt/cray/pe/admin-pe/site-config - fi - export NCEPLIBS=/lustre/f1/pdata/ncep_shared/NCEPLIBS/lib - if [[ -d "$NCEPLIBS" ]] ; then - module use $NCEPLIBS/modulefiles - fi - if [[ "$__ms_source_etc_profile" == yes ]] ; then - source /etc/profile - unset __ms_source_etc_profile - fi -elif [[ -d /data/prod ]] ; then - # We are on SSEC's S4 - if ( ! eval module help > /dev/null 2>&1 ) ; then - source /usr/share/lmod/lmod/init/$__ms_shell - fi - module purge -else - echo WARNING: UNKNOWN PLATFORM 1>&2 -fi - -unset __ms_shell -unset __ms_ksh_test -unset __ms_bash_test -unset $__ms_function_name -unset __ms_function_name diff --git a/ush/load_fv3gfs_modules.sh b/ush/load_fv3gfs_modules.sh index b4f23fa331..48885c62e4 100755 --- a/ush/load_fv3gfs_modules.sh +++ b/ush/load_fv3gfs_modules.sh @@ -10,7 +10,7 @@ fi ulimit_s=$( ulimit -S -s ) # Find module command and purge: -source "${HOMEgfs}/modulefiles/module-setup.sh.inc" +source "${HOMEgfs}/ush/module-setup.sh" # Source versions file for runtime source "${HOMEgfs}/versions/run.ver" diff --git a/ush/load_ufsda_modules.sh b/ush/load_ufsda_modules.sh index 1ab730c30e..f15ae5666c 100755 --- a/ush/load_ufsda_modules.sh +++ b/ush/load_ufsda_modules.sh @@ -27,7 +27,7 @@ fi ulimit_s=$( ulimit -S -s ) # Find module command and purge: -source "${HOMEgfs}/modulefiles/module-setup.sh.inc" +source "${HOMEgfs}/ush/module-setup.sh" # Load our modules: module use "${HOMEgfs}/sorc/gdas.cd/modulefiles" diff --git a/ush/module-setup.sh b/ush/module-setup.sh index fd656966bf..e204bae8a2 100755 --- a/ush/module-setup.sh +++ b/ush/module-setup.sh @@ -1,6 +1,8 @@ #!/bin/bash set -u +source "${HOMEgfs}/ush/detect_machine.sh" + if [[ ${MACHINE_ID} = jet* ]] ; then # We are on NOAA Jet if ( ! eval module help > /dev/null 2>&1 ) ; then From 801058ffb0cbbfe101fd5b686aed79c5bf7538c1 Mon Sep 17 00:00:00 2001 From: "Henry R. Winterbottom" <49202169+HenryWinterbottom-NOAA@users.noreply.github.com> Date: Wed, 7 Feb 2024 00:41:59 -0700 Subject: [PATCH 02/16] Consolidate `npe_node_max` (#2289) - The environment variable `npe_node_max` is removed from all files beneath `global-workflow/env`; - The environment variable `npe_node_max` is removed from `parm/config/gefs/config.ufs` and `parm/config/gfs/config.ufs`; - The environment variable `npe_node_max` is maintained only within `parm/config/gefs/config.resources` and `parm/config/gfs/config.resources`. Resolves #2133 --- env/AWSPW.env | 1 - env/CONTAINER.env | 1 - env/HERA.env | 1 - env/HERCULES.env | 1 - env/JET.env | 7 ------ env/ORION.env | 1 - env/S4.env | 6 ----- env/WCOSS2.env | 2 -- parm/config/gefs/config.ufs | 48 ------------------------------------- parm/config/gfs/config.ufs | 48 ------------------------------------- 10 files changed, 116 deletions(-) diff --git a/env/AWSPW.env b/env/AWSPW.env index 894cce2343..ea5002ecb9 100755 --- a/env/AWSPW.env +++ b/env/AWSPW.env @@ -14,7 +14,6 @@ fi step=$1 -export npe_node_max=36 export launcher="mpiexec.hydra" export mpmd_opt="" diff --git a/env/CONTAINER.env b/env/CONTAINER.env index bfeb6dd6da..b1f55a4c98 100755 --- a/env/CONTAINER.env +++ b/env/CONTAINER.env @@ -14,7 +14,6 @@ fi step=$1 -export npe_node_max=40 export launcher="mpirun" export mpmd_opt="--multi-prog" diff --git a/env/HERA.env b/env/HERA.env index fb156645f8..e02c0aad22 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -14,7 +14,6 @@ fi step=$1 -export npe_node_max=40 export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" diff --git a/env/HERCULES.env b/env/HERCULES.env index 6a4aad7a7d..ebfa51398b 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -12,7 +12,6 @@ fi step=$1 -export npe_node_max=80 export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" diff --git a/env/JET.env b/env/JET.env index 7bb152c5f3..eada0b1c70 100755 --- a/env/JET.env +++ b/env/JET.env @@ -14,13 +14,6 @@ fi step=$1 -if [[ "${PARTITION_BATCH}" = "xjet" ]]; then - export npe_node_max=24 -elif [[ "${PARTITION_BATCH}" = "vjet" ]]; then - export npe_node_max=16 -elif [[ "${PARTITION_BATCH}" = "kjet" ]]; then - export npe_node_max=40 -fi export launcher="srun -l --epilog=/apps/local/bin/report-mem --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" diff --git a/env/ORION.env b/env/ORION.env index d91fd4db03..c5e94cc559 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -14,7 +14,6 @@ fi step=$1 -export npe_node_max=40 export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" diff --git a/env/S4.env b/env/S4.env index 3dab3fc3e7..b103e865d3 100755 --- a/env/S4.env +++ b/env/S4.env @@ -13,13 +13,7 @@ if [[ $# -ne 1 ]]; then fi step=$1 -PARTITION_BATCH=${PARTITION_BATCH:-"s4"} -if [[ ${PARTITION_BATCH} = "s4" ]]; then - export npe_node_max=32 -elif [[ ${PARTITION_BATCH} = "ivy" ]]; then - export npe_node_max=20 -fi export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" diff --git a/env/WCOSS2.env b/env/WCOSS2.env index a4fe81060d..307ad71c43 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -18,8 +18,6 @@ step=$1 export launcher="mpiexec -l" export mpmd_opt="--cpu-bind verbose,core cfp" -export npe_node_max=128 - if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then nth_max=$((npe_node_max / npe_node_prep)) diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index 2031d0b538..866de52964 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -68,54 +68,6 @@ if [[ "${skip_mom6}" == "false" ]] || [[ "${skip_cice6}" == "false" ]] || [[ "${ skip_mediator=false fi -case "${machine}" in - "WCOSS2") - npe_node_max=128 - ;; - "HERA" | "ORION" ) - npe_node_max=40 - ;; - "HERCULES" ) - npe_node_max=80 - ;; - "JET") - case "${PARTITION_BATCH}" in - "xjet") - npe_node_max=24 - ;; - "vjet" | "sjet") - npe_node_max=16 - ;; - "kjet") - npe_node_max=40 - ;; - *) - echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!" - exit 1 - ;; - esac - ;; - "S4") - case "${PARTITION_BATCH}" in - "s4") - npe_node_max=32 - ;; - "ivy") - npe_node_max=20 - ;; - *) - echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!" - exit 1 - ;; - esac - ;; - *) - echo "FATAL ERROR: Unrecognized machine ${machine}" - exit 14 - ;; -esac -export npe_node_max - # (Standard) Model resolution dependent variables case "${fv3_res}" in "C48") diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 0a59da47ca..c8ce216899 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -68,54 +68,6 @@ if [[ "${skip_mom6}" == "false" ]] || [[ "${skip_cice6}" == "false" ]] || [[ "${ skip_mediator=false fi -case "${machine}" in - "WCOSS2") - npe_node_max=128 - ;; - "HERA" | "ORION" ) - npe_node_max=40 - ;; - "HERCULES" ) - npe_node_max=80 - ;; - "JET") - case "${PARTITION_BATCH}" in - "xjet") - npe_node_max=24 - ;; - "vjet" | "sjet") - npe_node_max=16 - ;; - "kjet") - npe_node_max=40 - ;; - *) - echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!" - exit 1 - ;; - esac - ;; - "S4") - case "${PARTITION_BATCH}" in - "s4") - npe_node_max=32 - ;; - "ivy") - npe_node_max=20 - ;; - *) - echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!" - exit 1 - ;; - esac - ;; - *) - echo "FATAL ERROR: Unrecognized machine ${machine}" - exit 14 - ;; -esac -export npe_node_max - # (Standard) Model resolution dependent variables case "${fv3_res}" in "C48") From f56352874d6dc133a4f1181f77c8f91ca38a6416 Mon Sep 17 00:00:00 2001 From: Kate Friedman Date: Wed, 7 Feb 2024 15:09:12 -0500 Subject: [PATCH 03/16] Update JGLOBAL_FORECAST for octal error (#2295) Add "10#" to ENSMEM value > 0 check to handle octal errors. --- jobs/JGLOBAL_FORECAST | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jobs/JGLOBAL_FORECAST b/jobs/JGLOBAL_FORECAST index 4e2f49ca37..bfdc7e3688 100755 --- a/jobs/JGLOBAL_FORECAST +++ b/jobs/JGLOBAL_FORECAST @@ -1,7 +1,7 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" -if (( ${ENSMEM:-0} > 0 )); then +if (( 10#${ENSMEM:-0} > 0 )); then source "${HOMEgfs}/ush/jjob_header.sh" -e "efcs" -c "base fcst efcs" else source "${HOMEgfs}/ush/jjob_header.sh" -e "fcst" -c "base fcst" From 43429e23c12c1f2050b3a3f356abdec98dc73ea0 Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Thu, 8 Feb 2024 15:30:28 -0500 Subject: [PATCH 04/16] Enable AO WCDA test (#1963) This PR: - adds GSI + SOCA C48 5-deg ocean 3DVar test (courtesy @guillaumevernieres) - adds a toggle to optionally disable ocnanalvrfy job. --- ci/cases/pr/C48mx500_3DVarAOWCDA.yaml | 22 ++++++++++++++++++++++ ci/cases/yamls/soca_gfs_defaults_ci.yaml | 5 +++++ env/HERA.env | 18 +++--------------- jobs/JGLOBAL_PREP_OCEAN_OBS | 4 ++-- parm/config/gfs/config.base.emc.dyn | 1 + parm/config/gfs/config.ocn | 10 +++++++++- parm/config/gfs/config.resources | 17 ++++++++++++----- parm/config/gfs/yaml/defaults.yaml | 13 +++++++------ workflow/applications/gfs_cycled.py | 11 +++++++---- workflow/setup_expt.py | 7 +++++++ 10 files changed, 75 insertions(+), 33 deletions(-) create mode 100644 ci/cases/pr/C48mx500_3DVarAOWCDA.yaml create mode 100644 ci/cases/yamls/soca_gfs_defaults_ci.yaml diff --git a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml new file mode 100644 index 0000000000..b972d3a445 --- /dev/null +++ b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml @@ -0,0 +1,22 @@ +experiment: + system: gfs + mode: cycled + +arguments: + pslot: {{ 'pslot' | getenv }} + app: S2S + resdetatmos: 48 + resdetocean: 5.0 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT + expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR + icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C48mx500 + idate: 2021032412 + edate: 2021032418 + nens: 0 + gfs_cyc: 0 + start: warm + yaml: {{ HOMEgfs }}/ci/cases/yamls/soca_gfs_defaults_ci.yaml + +skip_ci_on_hosts: + - orion + - hercules diff --git a/ci/cases/yamls/soca_gfs_defaults_ci.yaml b/ci/cases/yamls/soca_gfs_defaults_ci.yaml new file mode 100644 index 0000000000..126637cd86 --- /dev/null +++ b/ci/cases/yamls/soca_gfs_defaults_ci.yaml @@ -0,0 +1,5 @@ +defaults: + !INC {{ HOMEgfs }}/parm/config/gfs/yaml/defaults.yaml +base: + ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }} + DO_JEDIOCNVAR: "YES" diff --git a/env/HERA.env b/env/HERA.env index e02c0aad22..057a2313f8 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -93,31 +93,19 @@ elif [[ "${step}" = "ocnanalbmat" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" - nth_max=$((npe_node_max / npe_node_ocnanalbmat)) - - export NTHREADS_OCNANAL=${nth_ocnanalbmat:-${nth_max}} - [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} - export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat}" elif [[ "${step}" = "ocnanalrun" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" - nth_max=$((npe_node_max / npe_node_ocnanalrun)) - - export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}} - [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} - export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}" elif [[ "${step}" = "ocnanalchkpt" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" - nth_max=$((npe_node_max / npe_node_ocnanalchkpt)) - - export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}} - [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} - export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt}" elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then diff --git a/jobs/JGLOBAL_PREP_OCEAN_OBS b/jobs/JGLOBAL_PREP_OCEAN_OBS index a100aca89c..a6fcf9c9b3 100755 --- a/jobs/JGLOBAL_PREP_OCEAN_OBS +++ b/jobs/JGLOBAL_PREP_OCEAN_OBS @@ -15,7 +15,7 @@ YMD=${PDY} HH=${cyc} generate_com -rx COMOUT_OBS:COM_OBS_TMPL ############################################## # Add prep_marine_obs.py to PYTHONPATH -export PYTHONPATH=${HOMEgfs}/sorc/gdas.cd/ush/soca:${PYTHONPATH} +export PYTHONPATH=${HOMEgfs}/sorc/gdas.cd/ush:${PYTHONPATH} ############################################################### # Run relevant script @@ -38,7 +38,7 @@ if [[ -e "${pgmout}" ]] ; then fi ########################################## -# Handle the temporary working directory +# Handle the temporary working directory ########################################## cd "${DATAROOT}" || (echo "FATAL ERROR: ${DATAROOT} does not exist. ABORT!"; exit 1) [[ ${KEEPDATA} = "NO" ]] && rm -rf "${DATA}" diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 5260bbd4f6..6e1e8339f0 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -378,6 +378,7 @@ export binary_diag=".false." # Verification options export DO_METP="NO" # Run METPLUS jobs - set METPLUS settings in config.metp; not supported with spack-stack export DO_FIT2OBS="YES" # Run fit to observations package +export DO_VRFY_OCEANDA="NO" # Run SOCA Ocean DA verification tasks # Archiving options export HPSSARCH="@HPSSARCH@" # save data to HPSS archive diff --git a/parm/config/gfs/config.ocn b/parm/config/gfs/config.ocn index f9e6595ce9..317a76e58a 100644 --- a/parm/config/gfs/config.ocn +++ b/parm/config/gfs/config.ocn @@ -16,6 +16,14 @@ if [[ "${DO_JEDIOCNVAR}" == "YES" ]]; then else export ODA_INCUPD="False" fi -export ODA_INCUPD_NHOURS="3.0" # In MOM_input, this is time interval for applying increment + +# Time interval for applying the increment +if [[ "${DOIAU}" == "YES" ]]; then + export ODA_INCUPD_NHOURS="6.0" +else + export ODA_INCUPD_NHOURS="3.0" +fi + + echo "END: config.ocn" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 1dcf757de2..b746a4b32a 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -352,7 +352,7 @@ case ${step} in export npe_prepoceanobs=1 export nth_prepoceanobs=1 export npe_node_prepoceanobs=$(( npe_node_max / nth_prepoceanobs )) - export memory_prepoceanobs="24GB" + export memory_prepoceanobs="48GB" ;; "ocnanalbmat") @@ -406,13 +406,20 @@ case ${step} in export nth_ocnanalchkpt=1 export npe_node_ocnanalchkpt=$(( npe_node_max / nth_ocnanalchkpt )) case ${CASE} in - "C384") memory_ocnanalchkpt="128GB";; - "C96") memory_ocnanalchkpt="32GB";; - "C48") memory_ocnanalchkpt="32GB";; + "C384") + memory_ocnanalchkpt="128GB" + npes=40;; + "C96") + memory_ocnanalchkpt="32GB" + npes=16;; + "C48") + memory_ocnanalchkpt="32GB" + npes=8;; *) echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}" exit 4 esac + export npe_ocnanalchkpt=${npes} export memory_ocnanalchkpt ;; @@ -926,7 +933,7 @@ case ${step} in export nth_eupd=4 fi ;; - *) + *) echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}" exit 4 ;; diff --git a/parm/config/gfs/yaml/defaults.yaml b/parm/config/gfs/yaml/defaults.yaml index 61fc32c126..10af47de07 100644 --- a/parm/config/gfs/yaml/defaults.yaml +++ b/parm/config/gfs/yaml/defaults.yaml @@ -25,13 +25,14 @@ landanl: IO_LAYOUT_Y: 1 ocnanal: - SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space. @guillaumevernieres will open an issue - CASE_ANL: "C48" - COMIN_OBS: "/scratch2/NCEPDEV/marineda/r2d2-v2-v3" # TODO: make platform agnostic - SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml" + SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space. + CASE_ANL: "C48" # TODO: Check in gdasapp if used anywhere for SOCA + SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml" # TODO: This is also repeated in oceanprepobs SOCA_NINNER: 100 - R2D2_OBS_SRC: "gdas_marine" - R2D2_OBS_DUMP: "s2s_v1" SABER_BLOCKS_YAML: "" NICAS_RESOL: 1 NICAS_GRID_SIZE: 15000 +prepoceanobs: + SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml" # TODO: This is also repeated in ocnanal + OBSPREP_YAML: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obsprep/obsprep_config.yaml" + DMPDIR: "/scratch1/NCEPDEV/global/glopara/data/experimental_obs" diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 7b5892d7c0..6dd0342a78 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -18,6 +18,7 @@ def __init__(self, conf: Configuration): self.do_jediocnvar = self._base.get('DO_JEDIOCNVAR', False) self.do_jedilandda = self._base.get('DO_JEDILANDDA', False) self.do_mergensst = self._base.get('DO_MERGENSST', False) + self.do_vrfy_oceanda = self._base.get('DO_VRFY_OCEANDA', False) self.lobsdiag_forenkf = False self.eupd_cdumps = None @@ -43,8 +44,9 @@ def _get_app_configs(self): if self.do_jediocnvar: configs += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat', - 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost', - 'ocnanalvrfy'] + 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost'] + if self.do_vrfy_oceanda: + configs += ['ocnanalvrfy'] if self.do_ocean: configs += ['ocnpost'] @@ -137,8 +139,9 @@ def get_task_names(self): if self.do_jediocnvar: gdas_gfs_common_tasks_before_fcst += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun', - 'ocnanalchkpt', 'ocnanalpost', - 'ocnanalvrfy'] + 'ocnanalchkpt', 'ocnanalpost'] + if self.do_vrfy_oceanda: + gdas_gfs_common_tasks_before_fcst += ['ocnanalvrfy'] gdas_gfs_common_tasks_before_fcst += ['sfcanl', 'analcalc'] diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py index 2bc41854d8..3eeb584f46 100755 --- a/workflow/setup_expt.py +++ b/workflow/setup_expt.py @@ -224,6 +224,13 @@ def link_files_from_src_to_dst(src_dir, dst_dir): src_file = os.path.join(src_dir, fname) if os.path.exists(src_file): os.symlink(src_file, os.path.join(dst_dir, fname)) + # First 1/2 cycle also needs a atmos increment if doing warm start + if inputs.start in ['warm']: + for ftype in ['atmi003.nc', 'atminc.nc', 'atmi009.nc']: + fname = f'{inputs.cdump}.t{idatestr[8:]}z.{ftype}' + src_file = os.path.join(src_dir, fname) + if os.path.exists(src_file): + os.symlink(src_file, os.path.join(dst_dir, fname)) return From 54daa31ce0a3c23d4d74def5e54436a39a899ed4 Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Thu, 8 Feb 2024 15:48:38 -0500 Subject: [PATCH 05/16] Jenkins Declartive Pipeline for CI with gfs/gefs multibuilds (#2246) Adding top level Jenkins file for CI tests running on Jenkins Controller: - Declarative Multi-branch Pipeline (has enhanced restart capabilities on a per section bases) - Starts Pipeline from Label PR same as BASH system (for now) - Progress and restarts can me managed with CAC Login at [EPIC OAR Jenkins](https://jenkins.epic.oarcloud.noaa.gov) - Has logic for multi **gfs/gefs** system builds (arguments based on a configuration file `ci/casts/yamls/build.yaml`) - Any number of **systems** may be added by manual adding an ele- ment to the matrix in the Jenkinsfile - _It may be possible to dynamic add matrix values with a specialty plug-in_ - Currently only runs on **Orion** and **Hera** using `mterry` account Resolves #2119 Resolves #2118 --- Jenkinsfile | 188 +++++++++++++++++++++++++++ ci/cases/yamls/build.yaml | 3 + ci/scripts/run-check_ci.sh | 4 +- ci/scripts/utils/ci_utils.sh | 124 ++++++++++++++++-- ci/scripts/utils/ci_utils_wrapper.sh | 9 ++ modulefiles/module_gwsetup.hera.lua | 2 + 6 files changed, 315 insertions(+), 15 deletions(-) create mode 100644 Jenkinsfile create mode 100644 ci/cases/yamls/build.yaml create mode 100755 ci/scripts/utils/ci_utils_wrapper.sh diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000000..c591aae70f --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,188 @@ +def Machine = 'none' +def machine = 'none' +def HOME = 'none' +def localworkspace = 'none' +def commonworkspace = 'none' + +pipeline { + agent { label 'built-in' } + + options { + skipDefaultCheckout() + buildDiscarder(logRotator(numToKeepStr: '2')) + } + + stages { // This initial stage is used to get the Machine name from the GitHub labels on the PR + // which is used to designate the Nodes in the Jenkins Controler by the agent label + // Each Jenknis Node is connected to said machine via an JAVA agent via an ssh tunnel + + stage('Get Machine') { + agent { label 'built-in' } + steps { + script { + localworkspace = env.WORKSPACE + machine = 'none' + for (label in pullRequest.labels) { + echo "Label: ${label}" + if ((label.matches("CI-Hera-Ready"))) { + machine = 'hera' + } else if ((label.matches("CI-Orion-Ready"))) { + machine = 'orion' + } else if ((label.matches("CI-Hercules-Ready"))) { + machine = 'hercules' + } + } // createing a second machine varible with first letter capital + // because the first letter of the machine name is captitalized in the GitHub labels + Machine = machine[0].toUpperCase() + machine.substring(1) + } + } + } + + stage('Get Common Workspace') { + agent { label "${machine}-emc" } + steps { + script { + properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in','Hera-EMC','Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])]) + HOME = "${WORKSPACE}/TESTDIR" + commonworkspace = "${WORKSPACE}" + sh( script: "mkdir -p ${HOME}/RUNTESTS", returnStatus: true) + pullRequest.addLabel("CI-${Machine}-Building") + if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Ready") } ) { + pullRequest.removeLabel("CI-${Machine}-Ready") + } + } + } + } + + stage('Build System') { + matrix { + agent { label "${machine}-emc" } + //options { + // throttle(['global_matrix_build']) + //} + axes { + axis { + name "system" + values "gfs", "gefs" + } + } + stages { + stage("build system") { + steps { + script { + def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to build the system on per system basis under the common workspace HOME + sh( script: "mkdir -p ${HOMEgfs}", returnStatus: true) + ws(HOMEgfs) { + env.MACHINE_ID = machine // MACHINE_ID is used in the build scripts to determine the machine and is added to the shell environment + if (fileExists("${HOMEgfs}/sorc/BUILT_semaphor")) { // if the system is already built, skip the build in the case of re-runs + sh( script: "cat ${HOMEgfs}/sorc/BUILT_semaphor", returnStdout: true).trim() // TODO: and user configurable control to manage build semphore + ws(commonworkspace) { pullRequest.comment("Cloned PR already built (or build skipped) on ${machine} in directory ${HOMEgfs}") } + } else { + checkout scm + sh( script: "source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive", returnStatus: true) + def builds_file = readYaml file: "ci/cases/yamls/build.yaml" + def build_args_list = builds_file['builds'] + def build_args = build_args_list[system].join(" ").trim().replaceAll("null", "") + dir("${HOMEgfs}/sorc") { + sh( script: "${build_args}", returnStatus: true) + sh( script: "./link_workflow.sh", returnStatus: true) + sh( script: "echo ${HOMEgfs} > BUILT_semaphor", returnStatus: true) + } + } + if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Building") } ) { + pullRequest.removeLabel("CI-${Machine}-Building") + } + pullRequest.addLabel("CI-${Machine}-Running") + } + } + } + } + } + } + } + + stage('Run Tests') { + matrix { + agent { label "${machine}-emc" } + axes { + axis { + name "Case" + values "C48_ATM", "C48_S2SWA_gefs", "C48_S2SW", "C96_atm3DVar" // TODO add dynamic list of cases from env vars (needs addtional plugins) + } + } + stages { + stage('Create Experiment') { + steps { + script { + sh( script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp", returnStatus: true) + def yaml_case = readYaml file: "${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp" + system = yaml_case.experiment.system + def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to populate the XML on per system basis + env.RUNTESTS = "${HOME}/RUNTESTS" + sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStatus: true) + } + } + } + stage('Run Experiments') { + steps { + script { + HOMEgfs = "${HOME}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments + ws(HOMEgfs) { + pslot = sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true ).trim() + pullRequest.comment("**Running experiments: ${Case} on ${Machine}**
Built against system **${system}** in directory:
`${HOMEgfs}`
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`") + try { + sh( script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}", returnStatus: true) + } catch (Exception e) { + pullRequest.comment("**FAILURE** running experiments: ${Case} on ${Machine}") + error("Failed to run experiments ${Case} on ${Machine}") + } + pullRequest.comment("**SUCCESS** running experiments: ${Case} on ${Machine}") + } + } + } + post { + always { + script { + ws (HOMEgfs) { + for (label in pullRequest.labels) { + if (label.contains("${Machine}")) { + pullRequest.removeLabel(label) + } + } + } + } + } + success { + script { + ws (HOMEgfs) { + pullRequest.addLabel("CI-${Machine}-Passed") + def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York')) + pullRequest.comment("**CI SUCCESS** ${Machine} at ${timestamp}\n\nBuilt and ran in directory `${HOME}`") + } + } + } + failure { + script { + ws (HOMEgfs) { + pullRequest.addLabel("CI-${Machine}-Failed") + def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York')) + pullRequest.comment("**CI FAILED** ${Machine} at ${timestamp}
Built and ran in directory `${HOME}`") + if (fileExists('${HOME}/RUNTESTS/ci.log')) { + def fileContent = readFile '${HOME}/RUNTESTS/ci.log' + fileContent.eachLine { line -> + if( line.contains(".log")) { + archiveArtifacts artifacts: "${line}", fingerprint: true + } + } + } + } + } + } + } + } + } + } + } + } + +} diff --git a/ci/cases/yamls/build.yaml b/ci/cases/yamls/build.yaml new file mode 100644 index 0000000000..5398fa1889 --- /dev/null +++ b/ci/cases/yamls/build.yaml @@ -0,0 +1,3 @@ +builds: + - gefs: './build_all.sh' + - gfs: './build_all.sh -gu' \ No newline at end of file diff --git a/ci/scripts/run-check_ci.sh b/ci/scripts/run-check_ci.sh index 5a909c1c64..f98f434462 100755 --- a/ci/scripts/run-check_ci.sh +++ b/ci/scripts/run-check_ci.sh @@ -21,7 +21,9 @@ pslot=${2:-${pslot:-?}} # Name of the experiment being tested by this scr # │   └── ${pslot} # └── EXPDIR # └── ${pslot} -HOMEgfs="${TEST_DIR}/HOMEgfs" +# Two system build directories created at build time gfs, and gdas +# TODO: Make this configurable (for now all scripts run from gfs for CI at runtime) +HOMEgfs="${TEST_DIR}/gfs" RUNTESTS="${TEST_DIR}/RUNTESTS" # Source modules and setup logging diff --git a/ci/scripts/utils/ci_utils.sh b/ci/scripts/utils/ci_utils.sh index 737a3e5a86..6f2426c388 100755 --- a/ci/scripts/utils/ci_utils.sh +++ b/ci/scripts/utils/ci_utils.sh @@ -1,24 +1,120 @@ #!/bin/env bash -function cancel_slurm_jobs() { +function determine_scheduler() { + if command -v sbatch &> /dev/null; then + echo "slurm"; + elif command -v qsub &> /dev/null; then + echo "torque"; + else + echo "unknown" + fi +} - # Usage: cancel_slurm_jobs - # Example: cancel_slurm_jobs "C48_ATM_3c4e7f74" +function cancel_batch_jobs() { + # Usage: cancel_batch_jobs + # Example: cancel_batch_jobs "C48_ATM_3c4e7f74" # - # Cancel all Slurm jobs that have the given substring in their name + # Cancel all batch jobs that have the given substring in their name # So like in the example all jobs with "C48_ATM_3c4e7f74" # in their name will be canceled local substring=$1 local job_ids - job_ids=$(squeue -u "${USER}" -h -o "%i") - - for job_id in ${job_ids}; do - job_name=$(sacct -j "${job_id}" --format=JobName%100 | head -3 | tail -1 | sed -r 's/\s+//g') || true - if [[ "${job_name}" =~ ${substring} ]]; then - echo "Canceling Slurm Job ${job_name} with: scancel ${job_id}" - scancel "${job_id}" - continue - fi - done + + scheduler=$(determine_scheduler) + + if [[ "${schduler}" == "torque" ]]; then + job_ids=$(qstat -u "${USER}" | awk '{print $1}') || true + + for job_id in ${job_ids}; do + job_name=$(qstat -f "${job_id}" | grep Job_Name | awk '{print $3}') || true + if [[ "${job_name}" =~ ${substring} ]]; then + echo "Canceling PBS Job ${job_name} with: qdel ${job_id}" + qdel "${job_id}" + continue + fi + done + + elif [[ "${scheduler}" == "slurm" ]]; then + + job_ids=$(squeue -u "${USER}" -h -o "%i") + + for job_id in ${job_ids}; do + job_name=$(sacct -j "${job_id}" --format=JobName%100 | head -3 | tail -1 | sed -r 's/\s+//g') || true + if [[ "${job_name}" =~ ${substring} ]]; then + echo "Canceling Slurm Job ${job_name} with: scancel ${job_id}" + scancel "${job_id}" + continue + fi + done + + else + echo "FATAL: Unknown/unsupported job scheduler" + exit 1 + fi +} + + +function get_pr_case_list () { + + ############################################################# + # loop over every yaml file in the PR's ci/cases + # and create an run directory for each one for this PR loop + ############################################################# + for yaml_config in "${HOMEgfs}/ci/cases/pr/"*.yaml; do + case=$(basename "${yaml_config}" .yaml) || true + echo "${case}" + done +} + +function get_pslot_list () { + + local RUNTESTS="${1}" + + ############################################################# + # loop over expdir directories in RUNTESTS + # and create list of the directory names (pslot) with the hash tag + ############################################################# + for pslot_dir in "${RUNTESTS}/EXPDIR/"*; do + pslot=$(basename "${pslot_dir}") || true + echo "${pslot}" + done + +} + +function get_pslot () { + + local RUNTESTS="${1}" + local case="${2}" + + ############################################################# + # loop over expdir directories in RUNTESTS + # and return the name of the pslot with its tag that matches the case + ############################################################# + for pslot_dir in "${RUNTESTS}/EXPDIR/"*; do + pslot=$(basename "${pslot_dir}") + check_case=$(echo "${pslot}" | rev | cut -d"_" -f2- | rev) || true + if [[ "${check_case}" == "${case}" ]]; then + echo "${pslot}" + break + fi + done + +} + +function create_experiment () { + + local yaml_config="${1}" + cd "${HOMEgfs}" || exit 1 + pr_sha=$(git rev-parse --short HEAD) + case=$(basename "${yaml_config}" .yaml) || true + export pslot=${case}_${pr_sha} + + source "${HOMEgfs}/ci/platforms/config.${MACHINE_ID}" + source "${HOMEgfs}/workflow/gw_setup.sh" + + # system=$(grep "system:" "${yaml_config}" | cut -d":" -f2 | tr -d " ") || true + + "${HOMEgfs}/${system}/workflow/create_experiment.py" --overwrite --yaml "${yaml_config}" + } diff --git a/ci/scripts/utils/ci_utils_wrapper.sh b/ci/scripts/utils/ci_utils_wrapper.sh new file mode 100755 index 0000000000..51c392fb99 --- /dev/null +++ b/ci/scripts/utils/ci_utils_wrapper.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +HOMEgfs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." >/dev/null 2>&1 && pwd )" +source "${HOMEgfs}/ush/detect_machine.sh" + +utitilty_function="${1}" + +source "${HOMEgfs}/ci/scripts/utils/ci_utils.sh" +${utitilty_function} "${@:2}" diff --git a/modulefiles/module_gwsetup.hera.lua b/modulefiles/module_gwsetup.hera.lua index c86cac7b02..961403e1a2 100644 --- a/modulefiles/module_gwsetup.hera.lua +++ b/modulefiles/module_gwsetup.hera.lua @@ -14,5 +14,7 @@ load(pathJoin("python", python_ver)) load("py-jinja2") load("py-pyyaml") load("py-numpy") +local git_ver=os.getenv("git_ver") or "2.40.0" +load(pathJoin("git", git_ver)) whatis("Description: GFS run setup environment") From 061992bb6160554430cf688adf6184f01b732098 Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Sat, 10 Feb 2024 01:33:36 -0500 Subject: [PATCH 06/16] Fix Jenkins success reporting (#2302) Moving the post section back outside of main Run Experiments stage. This allows the system to correctly report the **Success** status until after all tests pass. _Had originally moved them in attempts to solve "Not an SCM GitHub Job" issue and cause the reporting to misbehave._ Also ran through Jenkins linter and updated some messaging that was incorrectly reporting system build type. --- Jenkinsfile | 154 ++++++++++++++++++++++++++-------------------------- 1 file changed, 77 insertions(+), 77 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index c591aae70f..6c0a0bb71f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -24,15 +24,15 @@ pipeline { machine = 'none' for (label in pullRequest.labels) { echo "Label: ${label}" - if ((label.matches("CI-Hera-Ready"))) { + if ((label.matches('CI-Hera-Ready'))) { machine = 'hera' - } else if ((label.matches("CI-Orion-Ready"))) { + } else if ((label.matches('CI-Orion-Ready'))) { machine = 'orion' - } else if ((label.matches("CI-Hercules-Ready"))) { + } else if ((label.matches('CI-Hercules-Ready'))) { machine = 'hercules' } - } // createing a second machine varible with first letter capital - // because the first letter of the machine name is captitalized in the GitHub labels + } // createing a second machine varible with first letter capital + // because the first letter of the machine name is captitalized in the GitHub labels Machine = machine[0].toUpperCase() + machine.substring(1) } } @@ -42,17 +42,17 @@ pipeline { agent { label "${machine}-emc" } steps { script { - properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in','Hera-EMC','Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])]) + properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in', 'Hera-EMC', 'Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])]) HOME = "${WORKSPACE}/TESTDIR" commonworkspace = "${WORKSPACE}" - sh( script: "mkdir -p ${HOME}/RUNTESTS", returnStatus: true) + sh(script: "mkdir -p ${HOME}/RUNTESTS", returnStatus: true) pullRequest.addLabel("CI-${Machine}-Building") - if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Ready") } ) { + if (pullRequest.labels.any { value -> value.matches("CI-${Machine}-Ready") }) { pullRequest.removeLabel("CI-${Machine}-Ready") - } } } } + } stage('Build System') { matrix { @@ -61,66 +61,66 @@ pipeline { // throttle(['global_matrix_build']) //} axes { - axis { - name "system" - values "gfs", "gefs" + axis { + name 'system' + values 'gfs', 'gefs' } } stages { - stage("build system") { + stage('build system') { steps { script { def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to build the system on per system basis under the common workspace HOME - sh( script: "mkdir -p ${HOMEgfs}", returnStatus: true) + sh(script: "mkdir -p ${HOMEgfs}", returnStatus: true) ws(HOMEgfs) { env.MACHINE_ID = machine // MACHINE_ID is used in the build scripts to determine the machine and is added to the shell environment if (fileExists("${HOMEgfs}/sorc/BUILT_semaphor")) { // if the system is already built, skip the build in the case of re-runs - sh( script: "cat ${HOMEgfs}/sorc/BUILT_semaphor", returnStdout: true).trim() // TODO: and user configurable control to manage build semphore + sh(script: "cat ${HOMEgfs}/sorc/BUILT_semaphor", returnStdout: true).trim() // TODO: and user configurable control to manage build semphore ws(commonworkspace) { pullRequest.comment("Cloned PR already built (or build skipped) on ${machine} in directory ${HOMEgfs}") } } else { checkout scm - sh( script: "source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive", returnStatus: true) - def builds_file = readYaml file: "ci/cases/yamls/build.yaml" + sh(script: 'source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive', returnStatus: true) + def builds_file = readYaml file: 'ci/cases/yamls/build.yaml' def build_args_list = builds_file['builds'] - def build_args = build_args_list[system].join(" ").trim().replaceAll("null", "") + def build_args = build_args_list[system].join(' ').trim().replaceAll('null', '') dir("${HOMEgfs}/sorc") { - sh( script: "${build_args}", returnStatus: true) - sh( script: "./link_workflow.sh", returnStatus: true) - sh( script: "echo ${HOMEgfs} > BUILT_semaphor", returnStatus: true) + sh(script: "${build_args}", returnStatus: true) + sh(script: './link_workflow.sh', returnStatus: true) + sh(script: "echo ${HOMEgfs} > BUILT_semaphor", returnStatus: true) } } - if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Building") } ) { - pullRequest.removeLabel("CI-${Machine}-Building") - } - pullRequest.addLabel("CI-${Machine}-Running") + if (pullRequest.labels.any { value -> value.matches("CI-${Machine}-Building") }) { + pullRequest.removeLabel("CI-${Machine}-Building") } + pullRequest.addLabel("CI-${Machine}-Running") } } } } } } +} stage('Run Tests') { matrix { agent { label "${machine}-emc" } axes { axis { - name "Case" - values "C48_ATM", "C48_S2SWA_gefs", "C48_S2SW", "C96_atm3DVar" // TODO add dynamic list of cases from env vars (needs addtional plugins) + name 'Case' + values 'C48_ATM', 'C48_S2SWA_gefs', 'C48_S2SW', 'C96_atm3DVar' // TODO add dynamic list of cases from env vars (needs addtional plugins) } } stages { stage('Create Experiment') { - steps { + steps { script { - sh( script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp", returnStatus: true) + sh(script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp", returnStatus: true) def yaml_case = readYaml file: "${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp" system = yaml_case.experiment.system def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to populate the XML on per system basis env.RUNTESTS = "${HOME}/RUNTESTS" - sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStatus: true) - } + sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStatus: true) + } } } stage('Run Experiments') { @@ -128,54 +128,15 @@ pipeline { script { HOMEgfs = "${HOME}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments ws(HOMEgfs) { - pslot = sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true ).trim() - pullRequest.comment("**Running experiments: ${Case} on ${Machine}**
Built against system **${system}** in directory:
`${HOMEgfs}`
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`") - try { - sh( script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}", returnStatus: true) + pslot = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true).trim() + pullRequest.comment("**Running** experiment: ${Case} on ${Machine}
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`") + try { + sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}", returnStatus: true) } catch (Exception e) { - pullRequest.comment("**FAILURE** running experiments: ${Case} on ${Machine}") - error("Failed to run experiments ${Case} on ${Machine}") - } - pullRequest.comment("**SUCCESS** running experiments: ${Case} on ${Machine}") - } - } - } - post { - always { - script { - ws (HOMEgfs) { - for (label in pullRequest.labels) { - if (label.contains("${Machine}")) { - pullRequest.removeLabel(label) - } - } - } - } - } - success { - script { - ws (HOMEgfs) { - pullRequest.addLabel("CI-${Machine}-Passed") - def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York')) - pullRequest.comment("**CI SUCCESS** ${Machine} at ${timestamp}\n\nBuilt and ran in directory `${HOME}`") - } - } - } - failure { - script { - ws (HOMEgfs) { - pullRequest.addLabel("CI-${Machine}-Failed") - def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York')) - pullRequest.comment("**CI FAILED** ${Machine} at ${timestamp}
Built and ran in directory `${HOME}`") - if (fileExists('${HOME}/RUNTESTS/ci.log')) { - def fileContent = readFile '${HOME}/RUNTESTS/ci.log' - fileContent.eachLine { line -> - if( line.contains(".log")) { - archiveArtifacts artifacts: "${line}", fingerprint: true - } - } - } + pullRequest.comment("**FAILURE** running experiment: ${Case} on ${Machine}") + error("Failed to run experiments ${Case} on ${Machine}") } + pullRequest.comment("**SUCCESS** running experiment: ${Case} on ${Machine}") } } } @@ -185,4 +146,43 @@ pipeline { } } + post { + always { + script { + if(env.CHANGE_ID) { + for (label in pullRequest.labels) { + if (label.contains("${Machine}")) { + pullRequest.removeLabel(label) + } + } + } + } + } + success { + script { + if(env.CHANGE_ID) { + pullRequest.addLabel("CI-${Machine}-Passed") + def timestamp = new Date().format('MM dd HH:mm:ss', TimeZone.getTimeZone('America/New_York')) + pullRequest.comment("**CI SUCCESS** ${Machine} at ${timestamp}\n\nBuilt and ran in directory `${HOME}`") + } + } + } + failure { + script { + if(env.CHANGE_ID) { + pullRequest.addLabel("CI-${Machine}-Failed") + def timestamp = new Date().format('MM dd HH:mm:ss', TimeZone.getTimeZone('America/New_York')) + pullRequest.comment("**CI FAILED** ${Machine} at ${timestamp}
Built and ran in directory `${HOME}`") + } + if (fileExists('${HOME}/RUNTESTS/ci.log')) { + def fileContent = readFile '${HOME}/RUNTESTS/ci.log' + fileContent.eachLine { line -> + if (line.contains('.log')) { + archiveArtifacts artifacts: "${line}", fingerprint: true + } + } + } + } + } + } } From bb4ca65fe5524f76e40b97346339f1dda6680ce1 Mon Sep 17 00:00:00 2001 From: CatherineThomas-NOAA <59020064+CatherineThomas-NOAA@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:50:41 +0000 Subject: [PATCH 07/16] Redo v16.3 config.base changes for DA increments (#2304) Include the additional hydrometeors to the INCREMENTS_TO_ZERO and INCVARS_ZERO_STRAT variables in config.base that were modified in v16.3. Resolves: #2303 --- parm/config/gfs/config.base.emc.dyn | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 6e1e8339f0..1f6568c3ee 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -360,13 +360,13 @@ export MAKE_NSSTBUFR="@MAKE_NSSTBUFR@" export MAKE_ACFTBUFR="@MAKE_ACFTBUFR@" # Analysis increments to zero in CALCINCEXEC -export INCREMENTS_TO_ZERO="'liq_wat_inc','icmr_inc'" +export INCREMENTS_TO_ZERO="'liq_wat_inc','icmr_inc','rwmr_inc','snmr_inc','grle_inc'" # Write analysis files for early cycle EnKF export DO_CALC_INCREMENT_ENKF_GFS="YES" # Stratospheric increments to zero -export INCVARS_ZERO_STRAT="'sphum_inc','liq_wat_inc','icmr_inc'" +export INCVARS_ZERO_STRAT="'sphum_inc','liq_wat_inc','icmr_inc','rwmr_inc','snmr_inc','grle_inc'" export INCVARS_EFOLD="5" # Swith to generate netcdf or binary diagnostic files. If not specified, From 842adf38087aec9f1c0bca9567e4b11d494e14c7 Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Mon, 12 Feb 2024 12:50:08 -0500 Subject: [PATCH 08/16] Added additional test cases to the pr list in Jenkins (#2306) C48mx500_3DVarAOWCDA, C96C48_hybatmDA, and C96_atmsnowDA Co-authored-by: terrance.mcguinness --- Jenkinsfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6c0a0bb71f..9f3688ea6c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,7 +107,8 @@ pipeline { axes { axis { name 'Case' - values 'C48_ATM', 'C48_S2SWA_gefs', 'C48_S2SW', 'C96_atm3DVar' // TODO add dynamic list of cases from env vars (needs addtional plugins) + // TODO add dynamic list of cases from env vars (needs addtional plugins) + values 'C48_ATM', 'C48_S2SWA_gefs', 'C48_S2SW', 'C96_atm3DVar', 'C48mx500_3DVarAOWCDA', 'C96C48_hybatmDA', 'C96_atmsnowDA' } } stages { From 3f99f700c987526c8eb754b3f4c7b698b3e9b1dc Mon Sep 17 00:00:00 2001 From: Walter Kolczynski - NOAA Date: Tue, 13 Feb 2024 00:57:18 -0500 Subject: [PATCH 09/16] Add wave post jobs to GEFS (#2292) Adds the wave post jobs for gridded and points to GEFS. Boundary point jobs are added even though the current GEFS buoy file does not contain any (tested by manually subbing in the GFS buoy file). Resolves #827 --- parm/config/gefs/config.base.emc.dyn | 2 +- parm/config/gefs/config.resources | 66 ++++++--- parm/config/gefs/config.wavepostbndpnt | 11 ++ parm/config/gefs/config.wavepostbndpntbll | 11 ++ parm/config/gefs/config.wavepostpnt | 11 ++ parm/config/gefs/config.wavepostsbs | 28 ++++ parm/config/gfs/config.wavepostbndpnt | 2 +- parm/config/gfs/config.wavepostbndpntbll | 2 +- parm/config/gfs/config.wavepostpnt | 2 +- parm/config/gfs/config.wavepostsbs | 2 +- scripts/exgfs_wave_post_pnt.sh | 6 +- workflow/applications/gefs.py | 10 +- workflow/rocoto/gefs_tasks.py | 165 +++++++++++++++++++++- 13 files changed, 284 insertions(+), 34 deletions(-) create mode 100644 parm/config/gefs/config.wavepostbndpnt create mode 100644 parm/config/gefs/config.wavepostbndpntbll create mode 100644 parm/config/gefs/config.wavepostpnt create mode 100644 parm/config/gefs/config.wavepostsbs diff --git a/parm/config/gefs/config.base.emc.dyn b/parm/config/gefs/config.base.emc.dyn index ff2fe3377b..051a2188c3 100644 --- a/parm/config/gefs/config.base.emc.dyn +++ b/parm/config/gefs/config.base.emc.dyn @@ -144,7 +144,7 @@ export DO_OCN="NO" export DO_ICE="NO" export DO_AERO="NO" export WAVE_CDUMP="" # When to include wave suite: gdas, gfs, or both -export DOBNDPNT_WAVE="NO" +export DOBNDPNT_WAVE="NO" # The GEFS buoys file does not currently have any boundary points export FRAC_GRID=".true." # Set operational resolution diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index 36b70aecb8..a4ae76d7fb 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -68,6 +68,15 @@ esac export npe_node_max case ${step} in + + "stage_ic") + export wtime_stage_ic="00:15:00" + export npe_stage_ic=1 + export npe_node_stage_ic=1 + export nth_stage_ic=1 + export is_exclusive=True + ;; + "waveinit") export wtime_waveinit="00:10:00" export npe_waveinit=12 @@ -77,25 +86,10 @@ case ${step} in export memory_waveinit="2GB" ;; - "wavepostsbs") - export wtime_wavepostsbs="00:20:00" - export wtime_wavepostsbs_gfs="03:00:00" - export npe_wavepostsbs=8 - export nth_wavepostsbs=1 - export npe_node_wavepostsbs=$(( npe_node_max / nth_wavepostsbs )) - export NTASKS=${npe_wavepostsbs} - export memory_wavepostsbs="10GB" - export memory_wavepostsbs_gfs="10GB" - ;; - "fcst" | "efcs") export is_exclusive=True - if [[ "${step}" == "fcst" ]]; then - _CDUMP_LIST=${CDUMP:-"gdas gfs"} - elif [[ "${step}" == "efcs" ]]; then - _CDUMP_LIST=${CDUMP:-"enkfgdas enkfgfs"} - fi + _CDUMP_LIST=${CDUMP:-"gdas gfs"} # During workflow creation, we need resources for all CDUMPs and CDUMP is undefined for _CDUMP in ${_CDUMP_LIST}; do @@ -224,11 +218,39 @@ case ${step} in export is_exclusive=True ;; - "stage_ic") - export wtime_stage_ic="00:15:00" - export npe_stage_ic=1 - export npe_node_stage_ic=1 - export nth_stage_ic=1 + "wavepostsbs") + export wtime_wavepostsbs="03:00:00" + export npe_wavepostsbs=1 + export nth_wavepostsbs=1 + export npe_node_wavepostsbs=$(( npe_node_max / nth_wavepostsbs )) + export NTASKS=${npe_wavepostsbs} + export memory_wavepostsbs="10GB" + ;; + + "wavepostbndpnt") + export wtime_wavepostbndpnt="01:00:00" + export npe_wavepostbndpnt=240 + export nth_wavepostbndpnt=1 + export npe_node_wavepostbndpnt=$(( npe_node_max / nth_wavepostbndpnt )) + export NTASKS=${npe_wavepostbndpnt} + export is_exclusive=True + ;; + + "wavepostbndpntbll") + export wtime_wavepostbndpntbll="01:00:00" + export npe_wavepostbndpntbll=448 + export nth_wavepostbndpntbll=1 + export npe_node_wavepostbndpntbll=$(( npe_node_max / nth_wavepostbndpntbll )) + export NTASKS=${npe_wavepostbndpntbll} + export is_exclusive=True + ;; + + "wavepostpnt") + export wtime_wavepostpnt="04:00:00" + export npe_wavepostpnt=200 + export nth_wavepostpnt=1 + export npe_node_wavepostpnt=$(( npe_node_max / nth_wavepostpnt )) + export NTASKS=${npe_wavepostpnt} export is_exclusive=True ;; @@ -239,4 +261,4 @@ case ${step} in esac -echo "END: config.resources" \ No newline at end of file +echo "END: config.resources" diff --git a/parm/config/gefs/config.wavepostbndpnt b/parm/config/gefs/config.wavepostbndpnt new file mode 100644 index 0000000000..412c5fb42a --- /dev/null +++ b/parm/config/gefs/config.wavepostbndpnt @@ -0,0 +1,11 @@ +#! /usr/bin/env bash + +########## config.wavepostbndpnt ########## +# Wave steps specific + +echo "BEGIN: config.wavepostbndpnt" + +# Get task specific resources +source "${EXPDIR}/config.resources" wavepostbndpnt + +echo "END: config.wavepostbndpnt" diff --git a/parm/config/gefs/config.wavepostbndpntbll b/parm/config/gefs/config.wavepostbndpntbll new file mode 100644 index 0000000000..6695ab0f84 --- /dev/null +++ b/parm/config/gefs/config.wavepostbndpntbll @@ -0,0 +1,11 @@ +#! /usr/bin/env bash + +########## config.wavepostbndpntbll ########## +# Wave steps specific + +echo "BEGIN: config.wavepostbndpntbll" + +# Get task specific resources +source "${EXPDIR}/config.resources" wavepostbndpntbll + +echo "END: config.wavepostbndpntbll" diff --git a/parm/config/gefs/config.wavepostpnt b/parm/config/gefs/config.wavepostpnt new file mode 100644 index 0000000000..e87237da82 --- /dev/null +++ b/parm/config/gefs/config.wavepostpnt @@ -0,0 +1,11 @@ +#! /usr/bin/env bash + +########## config.wavepostpnt ########## +# Wave steps specific + +echo "BEGIN: config.wavepostpnt" + +# Get task specific resources +source "${EXPDIR}/config.resources" wavepostpnt + +echo "END: config.wavepostpnt" diff --git a/parm/config/gefs/config.wavepostsbs b/parm/config/gefs/config.wavepostsbs new file mode 100644 index 0000000000..b3c5902e3c --- /dev/null +++ b/parm/config/gefs/config.wavepostsbs @@ -0,0 +1,28 @@ +#! /usr/bin/env bash + +########## config.wavepostsbs ########## +# Wave steps specific + +echo "BEGIN: config.wavepostsbs" + +# Get task specific resources +source "${EXPDIR}/config.resources" wavepostsbs + +# Subgrid info for grib2 encoding +export WAV_SUBGRBSRC="" +export WAV_SUBGRB="" + +# Options for point output (switch on/off boundary point output) +export DOIBP_WAV='NO' # Input boundary points +export DOFLD_WAV='YES' # Field data +export DOPNT_WAV='YES' # Station data +export DOGRB_WAV='YES' # Create grib2 files +if [[ -n "${waveinterpGRD}" ]]; then + export DOGRI_WAV='YES' # Create interpolated grids +else + export DOGRI_WAV='NO' # Do not create interpolated grids +fi +export DOSPC_WAV='YES' # Spectral post +export DOBLL_WAV='YES' # Bulletin post + +echo "END: config.wavepostsbs" diff --git a/parm/config/gfs/config.wavepostbndpnt b/parm/config/gfs/config.wavepostbndpnt index dfeddc79b2..412c5fb42a 100644 --- a/parm/config/gfs/config.wavepostbndpnt +++ b/parm/config/gfs/config.wavepostbndpnt @@ -6,6 +6,6 @@ echo "BEGIN: config.wavepostbndpnt" # Get task specific resources -. $EXPDIR/config.resources wavepostbndpnt +source "${EXPDIR}/config.resources" wavepostbndpnt echo "END: config.wavepostbndpnt" diff --git a/parm/config/gfs/config.wavepostbndpntbll b/parm/config/gfs/config.wavepostbndpntbll index bb7224cc70..6695ab0f84 100644 --- a/parm/config/gfs/config.wavepostbndpntbll +++ b/parm/config/gfs/config.wavepostbndpntbll @@ -6,6 +6,6 @@ echo "BEGIN: config.wavepostbndpntbll" # Get task specific resources -. $EXPDIR/config.resources wavepostbndpntbll +source "${EXPDIR}/config.resources" wavepostbndpntbll echo "END: config.wavepostbndpntbll" diff --git a/parm/config/gfs/config.wavepostpnt b/parm/config/gfs/config.wavepostpnt index 8befb91760..e87237da82 100644 --- a/parm/config/gfs/config.wavepostpnt +++ b/parm/config/gfs/config.wavepostpnt @@ -6,6 +6,6 @@ echo "BEGIN: config.wavepostpnt" # Get task specific resources -. $EXPDIR/config.resources wavepostpnt +source "${EXPDIR}/config.resources" wavepostpnt echo "END: config.wavepostpnt" diff --git a/parm/config/gfs/config.wavepostsbs b/parm/config/gfs/config.wavepostsbs index 8e74aae069..b3c5902e3c 100644 --- a/parm/config/gfs/config.wavepostsbs +++ b/parm/config/gfs/config.wavepostsbs @@ -6,7 +6,7 @@ echo "BEGIN: config.wavepostsbs" # Get task specific resources -. $EXPDIR/config.resources wavepostsbs +source "${EXPDIR}/config.resources" wavepostsbs # Subgrid info for grib2 encoding export WAV_SUBGRBSRC="" diff --git a/scripts/exgfs_wave_post_pnt.sh b/scripts/exgfs_wave_post_pnt.sh index a7aa957564..c085c48f30 100755 --- a/scripts/exgfs_wave_post_pnt.sh +++ b/scripts/exgfs_wave_post_pnt.sh @@ -156,7 +156,11 @@ source "$HOMEgfs/ush/preamble.sh" cp -f $PARMwave/wave_${NET}.buoys buoy.loc.temp if [ "$DOBNDPNT_WAV" = YES ]; then #only do boundary points - sed -n '/^\$.*/!p' buoy.loc.temp | grep IBP > buoy.loc + sed -n '/^\$.*/!p' buoy.loc.temp | grep IBP > buoy.loc || { + echo "WARNING: No boundary points found in buoy file ${PARMwave}/wave_${NET}.buoys" + echo " Ending job without doing anything." + exit 0 + } else #exclude boundary points sed -n '/^\$.*/!p' buoy.loc.temp | grep -v IBP > buoy.loc diff --git a/workflow/applications/gefs.py b/workflow/applications/gefs.py index 9e8bb5c67e..1073397a08 100644 --- a/workflow/applications/gefs.py +++ b/workflow/applications/gefs.py @@ -20,7 +20,9 @@ def _get_app_configs(self): configs += ['efcs'] if self.do_wave: - configs += ['waveinit'] + configs += ['waveinit', 'wavepostsbs', 'wavepostpnt'] + if self.do_wave_bnd: + configs += ['wavepostbndpnt', 'wavepostbndpntbll'] return configs @@ -47,4 +49,10 @@ def get_task_names(self): tasks += ['atmprod'] + if self.do_wave: + tasks += ['wavepostsbs'] + if self.do_wave_bnd: + tasks += ['wavepostbndpnt', 'wavepostbndpntbll'] + tasks += ['wavepostpnt'] + return {f"{self._base['CDUMP']}": tasks} diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index a72753eb90..3eb249dc76 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -75,7 +75,7 @@ def stage_ic(self): def waveinit(self): resources = self.get_resource('waveinit') - task_name = f'waveinit' + task_name = f'wave_init' task_dict = {'task_name': task_name, 'resources': resources, 'envars': self.envars, @@ -90,14 +90,12 @@ def waveinit(self): return task def fcst(self): - - # TODO: Add real dependencies dependencies = [] dep_dict = {'type': 'task', 'name': f'stage_ic'} dependencies.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: - dep_dict = {'type': 'task', 'name': f'waveinit'} + dep_dict = {'type': 'task', 'name': f'wave_init'} dependencies.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) @@ -124,7 +122,7 @@ def efcs(self): dependencies.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: - dep_dict = {'type': 'task', 'name': f'waveinit'} + dep_dict = {'type': 'task', 'name': f'wave_init'} dependencies.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) @@ -206,3 +204,160 @@ def atmprod(self): task = rocoto.create_task(member_metatask_dict) return task + + def wavepostsbs(self): + deps = [] + for wave_grid in self._configs['wavepostsbs']['waveGRD'].split(): + wave_hist_path = self._template_to_rocoto_cycstring(self._base["COM_WAVE_HISTORY_TMPL"], {'MEMDIR': 'mem#member#'}) + data = f'{wave_hist_path}/gefswave.out_grd.{wave_grid}.@Y@m@d.@H0000' + dep_dict = {'type': 'data', 'data': data} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + + wave_post_envars = self.envars.copy() + postenvar_dict = {'ENSMEM': '#member#', + 'MEMDIR': 'mem#member#', + } + for key, value in postenvar_dict.items(): + wave_post_envars.append(rocoto.create_envar(name=key, value=str(value))) + + resources = self.get_resource('wavepostsbs') + + task_name = f'wave_post_grid_mem#member#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': wave_post_envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostsbs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(0, self.nmem + 1)])} + member_metatask_dict = {'task_name': 'wave_post_grid', + 'task_dict': task_dict, + 'var_dict': member_var_dict + } + + task = rocoto.create_task(member_metatask_dict) + + return task + + def wavepostbndpnt(self): + deps = [] + dep_dict = {'type': 'task', 'name': f'fcst_mem#member#'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps) + + wave_post_bndpnt_envars = self.envars.copy() + postenvar_dict = {'ENSMEM': '#member#', + 'MEMDIR': 'mem#member#', + } + for key, value in postenvar_dict.items(): + wave_post_bndpnt_envars.append(rocoto.create_envar(name=key, value=str(value))) + + resources = self.get_resource('wavepostbndpnt') + task_name = f'wave_post_bndpnt_mem#member#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': wave_post_bndpnt_envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpnt.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(0, self.nmem + 1)])} + member_metatask_dict = {'task_name': 'wave_post_bndpnt', + 'task_dict': task_dict, + 'var_dict': member_var_dict + } + + task = rocoto.create_task(member_metatask_dict) + + return task + + def wavepostbndpntbll(self): + deps = [] + atmos_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"], {'MEMDIR': 'mem#member#'}) + # Is there any reason this is 180? + data = f'{atmos_hist_path}/{self.cdump}.t@Hz.atm.logf180.txt' + dep_dict = {'type': 'data', 'data': data} + deps.append(rocoto.add_dependency(dep_dict)) + + dep_dict = {'type': 'task', 'name': f'fcst_mem#member#'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) + + wave_post_bndpnt_bull_envars = self.envars.copy() + postenvar_dict = {'ENSMEM': '#member#', + 'MEMDIR': 'mem#member#', + } + for key, value in postenvar_dict.items(): + wave_post_bndpnt_bull_envars.append(rocoto.create_envar(name=key, value=str(value))) + + resources = self.get_resource('wavepostbndpntbll') + task_name = f'wave_post_bndpnt_bull_mem#member#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': wave_post_bndpnt_bull_envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpntbll.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(0, self.nmem + 1)])} + member_metatask_dict = {'task_name': 'wave_post_bndpnt_bull', + 'task_dict': task_dict, + 'var_dict': member_var_dict + } + + task = rocoto.create_task(member_metatask_dict) + + return task + + def wavepostpnt(self): + deps = [] + dep_dict = {'type': 'task', 'name': f'fcst_mem#member#'} + deps.append(rocoto.add_dependency(dep_dict)) + if self.app_config.do_wave_bnd: + dep_dict = {'type': 'task', 'name': f'wave_post_bndpnt_bull_mem#member#'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + + wave_post_pnt_envars = self.envars.copy() + postenvar_dict = {'ENSMEM': '#member#', + 'MEMDIR': 'mem#member#', + } + for key, value in postenvar_dict.items(): + wave_post_pnt_envars.append(rocoto.create_envar(name=key, value=str(value))) + + resources = self.get_resource('wavepostpnt') + task_name = f'wave_post_pnt_mem#member#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': wave_post_pnt_envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostpnt.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(0, self.nmem + 1)])} + member_metatask_dict = {'task_name': 'wave_post_pnt', + 'task_dict': task_dict, + 'var_dict': member_var_dict + } + + task = rocoto.create_task(member_metatask_dict) + + return task From 64048926627f8c9edb087de286095e3b93a214c2 Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Tue, 13 Feb 2024 14:57:37 -0500 Subject: [PATCH 10/16] Ocean/ice product generation for GFS and GEFS (#2286) This PR does several things: 1. the model output for ocean and ice in the `COM/` directory are now named per EE2 convention for coupled model. E.g `gfs.ocean.t12z.6hr_avg.f120.nc` and `gfs.ocean.t12z.daily.f120.nc` 2. The products are generated using the `ocnicepost.fd` utility developed by @DeniseWorthen in https://github.com/NOAA-EMC/gfs-utils and converted to grib2 using example scripts provided by @GwenChen-NOAA using `wgrib2`. 3. NetCDF products on the native grid are also generated by subsetting variables from the raw model output. This is done with `xarray`. 4. updates the hash of https://github.com/NOAA-EMC/gfs-utils to include fixes in `ocnicepost.fd` 5. removes NCL related scripting that was previously used for ocean/ice interpolation and `reg2grb2` used for converting to grib2. 6. updates archive scripts to accommodate updated file names 7. removes intermediate ocean processed files such as 2D/3D/xsect data- sets 8. separate jobs are added for ocean and ice product generation. 9. removes intermediate restarts for the mediator and only saves the medi- ator restart at the end of the forecast in `COM`. 10. Increases memory for offline UPP when run at C768. The program segfaults with an OOM when memory is self allocated based on PEs by the scheduler on Hera. 11. Enables ocean/ice ensemble product generation for GEFS 12. Some minor clean-ups Fixes #935 Fixes #1317 Fixes #1864 --- .gitignore | 6 +- env/HERA.env | 7 + env/HERCULES.env | 10 + env/JET.env | 7 + env/ORION.env | 7 + env/S4.env | 7 + env/WCOSS2.env | 7 + jobs/JGLOBAL_ARCHIVE | 5 +- jobs/JGLOBAL_OCEANICE_PRODUCTS | 40 ++ jobs/rocoto/oceanice_products.sh | 37 ++ jobs/rocoto/ocnpost.sh | 119 ----- modulefiles/module_base.hera.lua | 1 + modulefiles/module_base.hercules.lua | 1 + modulefiles/module_base.jet.lua | 1 + modulefiles/module_base.orion.lua | 1 + modulefiles/module_base.s4.lua | 1 + parm/config/gefs/config.oceanice_products | 1 + parm/config/gefs/config.resources | 10 +- parm/config/gfs/config.com | 7 +- parm/config/gfs/config.oceanice_products | 15 + parm/config/gfs/config.ocnpost | 29 - parm/config/gfs/config.resources | 20 +- parm/post/oceanice_products.yaml | 75 +++ scripts/exglobal_archive.sh | 8 +- scripts/exglobal_forecast.sh | 4 + scripts/exglobal_oceanice_products.py | 52 ++ scripts/run_reg2grb2.sh | 72 --- scripts/run_regrid.sh | 27 - sorc/gfs_utils.fd | 2 +- sorc/link_workflow.sh | 15 +- sorc/ncl.setup | 12 - ush/forecast_det.sh | 2 +- ush/forecast_postdet.sh | 257 ++++----- ush/forecast_predet.sh | 112 ++-- ush/hpssarch_gen.sh | 77 +-- ush/icepost.ncl | 382 ------------- ush/oceanice_nc2grib2.sh | 319 +++++++++++ ush/ocnpost.ncl | 588 --------------------- ush/parsing_ufs_configure.sh | 20 +- ush/python/pygfs/task/oceanice_products.py | 337 ++++++++++++ versions/run.spack.ver | 1 + workflow/applications/gefs.py | 11 +- workflow/applications/gfs_cycled.py | 14 +- workflow/applications/gfs_forecast_only.py | 11 +- workflow/rocoto/gefs_tasks.py | 74 ++- workflow/rocoto/gfs_tasks.py | 138 +++-- workflow/rocoto/tasks.py | 6 +- 47 files changed, 1356 insertions(+), 1599 deletions(-) create mode 100755 jobs/JGLOBAL_OCEANICE_PRODUCTS create mode 100755 jobs/rocoto/oceanice_products.sh delete mode 100755 jobs/rocoto/ocnpost.sh create mode 120000 parm/config/gefs/config.oceanice_products create mode 100644 parm/config/gfs/config.oceanice_products delete mode 100644 parm/config/gfs/config.ocnpost create mode 100644 parm/post/oceanice_products.yaml create mode 100755 scripts/exglobal_oceanice_products.py delete mode 100755 scripts/run_reg2grb2.sh delete mode 100755 scripts/run_regrid.sh delete mode 100644 sorc/ncl.setup delete mode 100755 ush/icepost.ncl create mode 100755 ush/oceanice_nc2grib2.sh delete mode 100755 ush/ocnpost.ncl create mode 100644 ush/python/pygfs/task/oceanice_products.py diff --git a/.gitignore b/.gitignore index 869c78107c..2935804ac4 100644 --- a/.gitignore +++ b/.gitignore @@ -36,7 +36,6 @@ fix/gsi fix/lut fix/mom6 fix/orog -fix/reg2grb2 fix/sfc_climo fix/ugwd fix/verif @@ -99,6 +98,9 @@ parm/post/postxconfig-NT-GFS-WAFS.txt parm/post/postxconfig-NT-GFS.txt parm/post/postxconfig-NT-gefs-aerosol.txt parm/post/postxconfig-NT-gefs-chem.txt +parm/post/ocean.csv +parm/post/ice.csv +parm/post/ocnicepost.nml.jinja2 parm/ufs/noahmptable.tbl parm/ufs/model_configure.IN parm/ufs/MOM_input_*.IN @@ -137,7 +139,6 @@ sorc/radmon_bcor.fd sorc/radmon_time.fd sorc/rdbfmsua.fd sorc/recentersigp.fd -sorc/reg2grb2.fd sorc/supvit.fd sorc/syndat_getjtbul.fd sorc/syndat_maksynrc.fd @@ -147,6 +148,7 @@ sorc/tocsbufr.fd sorc/upp.fd sorc/vint.fd sorc/webtitle.fd +sorc/ocnicepost.fd # Ignore scripts from externals #------------------------------ diff --git a/env/HERA.env b/env/HERA.env index 057a2313f8..e9a0ee050f 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -199,6 +199,13 @@ elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation on Hera +elif [[ "${step}" = "oceanice_products" ]]; then + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" + elif [[ "${step}" = "ecen" ]]; then nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/env/HERCULES.env b/env/HERCULES.env index ebfa51398b..396e587798 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -207,10 +207,20 @@ case ${step} in [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}" ;; + "atmos_products") export USE_CFP="YES" # Use MPMD for downstream product generation ;; + +"oceanice_products") + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" +;; + "ecen") nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/env/JET.env b/env/JET.env index eada0b1c70..02e11950e5 100755 --- a/env/JET.env +++ b/env/JET.env @@ -190,6 +190,13 @@ elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation +elif [[ "${step}" = "oceanice_products" ]]; then + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" + elif [[ "${step}" = "ecen" ]]; then nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/env/ORION.env b/env/ORION.env index c5e94cc559..f0a97eb933 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -209,6 +209,13 @@ elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation +elif [[ "${step}" = "oceanice_products" ]]; then + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" + elif [[ "${step}" = "ecen" ]]; then nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/env/S4.env b/env/S4.env index b103e865d3..717d971c7d 100755 --- a/env/S4.env +++ b/env/S4.env @@ -177,6 +177,13 @@ elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation +elif [[ "${step}" = "oceanice_products" ]]; then + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" + elif [[ "${step}" = "ecen" ]]; then nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/env/WCOSS2.env b/env/WCOSS2.env index 307ad71c43..bbf4de2ae3 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -193,6 +193,13 @@ elif [[ "${step}" = "atmos_products" ]]; then export USE_CFP="YES" # Use MPMD for downstream product generation +elif [[ "${step}" = "oceanice_products" ]]; then + + nth_max=$((npe_node_max / npe_node_oceanice_products)) + + export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1} + export APRUN_OCNICEPOST="${launcher} -n 1 -ppn ${npe_node_oceanice_products} --cpu-bind depth --depth ${NTHREADS_OCNICEPOST}" + elif [[ "${step}" = "ecen" ]]; then nth_max=$((npe_node_max / npe_node_ecen)) diff --git a/jobs/JGLOBAL_ARCHIVE b/jobs/JGLOBAL_ARCHIVE index 66f6dfa8dc..235084e631 100755 --- a/jobs/JGLOBAL_ARCHIVE +++ b/jobs/JGLOBAL_ARCHIVE @@ -14,16 +14,15 @@ YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_ANALYSIS COM_ATMOS_BUFR COM_ATMO COM_ATMOS_TRACK COM_ATMOS_WMO \ COM_CHEM_HISTORY COM_CHEM_ANALYSIS\ COM_MED_RESTART \ - COM_ICE_HISTORY COM_ICE_INPUT COM_ICE_RESTART \ + COM_ICE_HISTORY COM_ICE_INPUT COM_ICE_RESTART COM_ICE_GRIB \ COM_OBS COM_TOP \ - COM_OCEAN_HISTORY COM_OCEAN_INPUT COM_OCEAN_RESTART COM_OCEAN_XSECT COM_OCEAN_2D COM_OCEAN_3D \ + COM_OCEAN_HISTORY COM_OCEAN_INPUT COM_OCEAN_RESTART COM_OCEAN_GRIB COM_OCEAN_NETCDF \ COM_OCEAN_ANALYSIS \ COM_WAVE_GRID COM_WAVE_HISTORY COM_WAVE_STATION \ COM_ATMOS_OZNMON COM_ATMOS_RADMON COM_ATMOS_MINMON COM_CONF for grid in "0p25" "0p50" "1p00"; do YMD=${PDY} HH=${cyc} GRID=${grid} generate_com -rx "COM_ATMOS_GRIB_${grid}:COM_ATMOS_GRIB_GRID_TMPL" - YMD=${PDY} HH=${cyc} GRID=${grid} generate_com -rx "COM_OCEAN_GRIB_${grid}:COM_OCEAN_GRIB_GRID_TMPL" done ############################################################### diff --git a/jobs/JGLOBAL_OCEANICE_PRODUCTS b/jobs/JGLOBAL_OCEANICE_PRODUCTS new file mode 100755 index 0000000000..1d8c6b42c6 --- /dev/null +++ b/jobs/JGLOBAL_OCEANICE_PRODUCTS @@ -0,0 +1,40 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" +source "${HOMEgfs}/ush/jjob_header.sh" -e "oceanice_products" -c "base oceanice_products" + + +############################################## +# Begin JOB SPECIFIC work +############################################## + +# Construct COM variables from templates +YMD="${PDY}" HH="${cyc}" generate_com -rx "COM_${COMPONENT^^}_HISTORY" +YMD="${PDY}" HH="${cyc}" generate_com -rx "COM_${COMPONENT^^}_GRIB" +YMD="${PDY}" HH="${cyc}" generate_com -rx "COM_${COMPONENT^^}_NETCDF" + +############################################################### +# Run exglobal script +"${HOMEgfs}/scripts/exglobal_oceanice_products.py" +status=$? +(( status != 0 )) && exit "${status}" + +############################################## +# End JOB SPECIFIC work +############################################## + +############################################## +# Final processing +############################################## +if [[ -e "${pgmout}" ]]; then + cat "${pgmout}" +fi + +########################################## +# Remove the Temporary working directory +########################################## +cd "${DATAROOT}" || exit 1 +[[ "${KEEPDATA:-NO}" == "NO" ]] && rm -rf "${DATA}" + + +exit 0 diff --git a/jobs/rocoto/oceanice_products.sh b/jobs/rocoto/oceanice_products.sh new file mode 100755 index 0000000000..48816fb3a1 --- /dev/null +++ b/jobs/rocoto/oceanice_products.sh @@ -0,0 +1,37 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +############################################################### +## ocean ice products driver script +## FHRLST : forecast hour list to post-process (e.g. f000, f000_f001_f002, ...) +############################################################### + +# Source FV3GFS workflow modules +. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" +status=$? +if (( status != 0 )); then exit "${status}"; fi + +############################################################### +# setup python path for workflow utilities and tasks +wxflowPATH="${HOMEgfs}/ush/python:${HOMEgfs}/ush/python/wxflow/src" +PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${wxflowPATH}" +export PYTHONPATH + +export job="oceanice_products" +export jobid="${job}.$$" + +############################################################### +# shellcheck disable=SC2153,SC2001 +IFS='_' read -ra fhrs <<< "${FHRLST//f}" # strip off the 'f's and convert to array + +#--------------------------------------------------------------- +# Execute the JJOB +for fhr in "${fhrs[@]}"; do + export FORECAST_HOUR=$(( 10#${fhr} )) + "${HOMEgfs}/jobs/JGLOBAL_OCEANICE_PRODUCTS" + status=$? + if (( status != 0 )); then exit "${status}"; fi +done + +exit 0 diff --git a/jobs/rocoto/ocnpost.sh b/jobs/rocoto/ocnpost.sh deleted file mode 100755 index 5a2dc091cf..0000000000 --- a/jobs/rocoto/ocnpost.sh +++ /dev/null @@ -1,119 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" - -############################################################### -## CICE5/MOM6 post driver script -## FHRGRP : forecast hour group to post-process (e.g. 0, 1, 2 ...) -## FHRLST : forecast hourlist to be post-process (e.g. anl, f000, f000_f001_f002, ...) -############################################################### - -# Source FV3GFS workflow modules -source "${HOMEgfs}/ush/load_fv3gfs_modules.sh" -status=$? -(( status != 0 )) && exit "${status}" - -export job="ocnpost" -export jobid="${job}.$$" -source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnpost" -c "base ocnpost" - -############################################## -# Set variables used in the exglobal script -############################################## -export CDUMP=${RUN/enkf} - -############################################## -# Begin JOB SPECIFIC work -############################################## -YMD=${PDY} HH=${cyc} generate_com -rx COM_OCEAN_HISTORY COM_OCEAN_2D COM_OCEAN_3D \ - COM_OCEAN_XSECT COM_ICE_HISTORY - -for grid in "0p50" "0p25"; do - YMD=${PDY} HH=${cyc} GRID=${grid} generate_com -rx "COM_OCEAN_GRIB_${grid}:COM_OCEAN_GRIB_GRID_TMPL" -done - -for outdir in COM_OCEAN_2D COM_OCEAN_3D COM_OCEAN_XSECT COM_OCEAN_GRIB_0p25 COM_OCEAN_GRIB_0p50; do - if [[ ! -d "${!outdir}" ]]; then - mkdir -p "${!outdir}" - fi -done - -fhrlst=$(echo ${FHRLST} | sed -e 's/_/ /g; s/f/ /g; s/,/ /g') - -export OMP_NUM_THREADS=1 -export ENSMEM=${ENSMEM:-000} - -export IDATE=${PDY}${cyc} - -for fhr in ${fhrlst}; do - export fhr=${fhr} - # Ignore possible spelling error (nothing is misspelled) - # shellcheck disable=SC2153 - VDATE=$(${NDATE} "${fhr}" "${IDATE}") - # shellcheck disable= - declare -x VDATE - cd "${DATA}" || exit 2 - if (( 10#${fhr} > 0 )); then - # TODO: This portion calls NCL scripts that are deprecated (see Issue #923) - if [[ "${MAKE_OCN_GRIB:-YES}" == "YES" ]]; then - export MOM6REGRID=${MOM6REGRID:-${HOMEgfs}} - "${MOM6REGRID}/scripts/run_regrid.sh" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - - # Convert the netcdf files to grib2 - export executable=${MOM6REGRID}/exec/reg2grb2.x - "${MOM6REGRID}/scripts/run_reg2grb2.sh" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - ${NMV} "ocn_ice${VDATE}.${ENSMEM}.${IDATE}_0p25x0p25.grb2" "${COM_OCEAN_GRIB_0p25}/" - ${NMV} "ocn_ice${VDATE}.${ENSMEM}.${IDATE}_0p5x0p5.grb2" "${COM_OCEAN_GRIB_0p50}/" - fi - - #break up ocn netcdf into multiple files: - if [[ -f "${COM_OCEAN_2D}/ocn_2D_${VDATE}.${ENSMEM}.${IDATE}.nc" ]]; then - echo "File ${COM_OCEAN_2D}/ocn_2D_${VDATE}.${ENSMEM}.${IDATE}.nc already exists" - else - ncks -x -v vo,uo,so,temp \ - "${COM_OCEAN_HISTORY}/ocn${VDATE}.${ENSMEM}.${IDATE}.nc" \ - "${COM_OCEAN_2D}/ocn_2D_${VDATE}.${ENSMEM}.${IDATE}.nc" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - fi - if [[ -f "${COM_OCEAN_3D}/ocn_3D_${VDATE}.${ENSMEM}.${IDATE}.nc" ]]; then - echo "File ${COM_OCEAN_3D}/ocn_3D_${VDATE}.${ENSMEM}.${IDATE}.nc already exists" - else - ncks -x -v Heat_PmE,LW,LwLatSens,MLD_003,MLD_0125,SSH,SSS,SST,SSU,SSV,SW,cos_rot,ePBL,evap,fprec,frazil,latent,lprec,lrunoff,sensible,sin_rot,speed,taux,tauy,wet_c,wet_u,wet_v \ - "${COM_OCEAN_HISTORY}/ocn${VDATE}.${ENSMEM}.${IDATE}.nc" \ - "${COM_OCEAN_3D}/ocn_3D_${VDATE}.${ENSMEM}.${IDATE}.nc" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - fi - if [[ -f "${COM_OCEAN_XSECT}/ocn-temp-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc" ]]; then - echo "File ${COM_OCEAN_XSECT}/ocn-temp-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc already exists" - else - ncks -v temp -d yh,0.0 \ - "${COM_OCEAN_3D}/ocn_3D_${VDATE}.${ENSMEM}.${IDATE}.nc" \ - "${COM_OCEAN_XSECT}/ocn-temp-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - fi - if [[ -f "${COM_OCEAN_XSECT}/ocn-uo-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc" ]]; then - echo "File ${COM_OCEAN_XSECT}/ocn-uo-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc already exists" - else - ncks -v uo -d yh,0.0 \ - "${COM_OCEAN_3D}/ocn_3D_${VDATE}.${ENSMEM}.${IDATE}.nc" \ - "${COM_OCEAN_XSECT}/ocn-uo-EQ_${VDATE}.${ENSMEM}.${IDATE}.nc" - status=$? - [[ ${status} -ne 0 ]] && exit "${status}" - fi - fi -done - -# clean up working folder -if [[ ${KEEPDATA:-"NO"} = "NO" ]] ; then rm -rf "${DATA}" ; fi -############################################################### -# Exit out cleanly - - -exit 0 diff --git a/modulefiles/module_base.hera.lua b/modulefiles/module_base.hera.lua index 311fb0a1cf..31df384298 100644 --- a/modulefiles/module_base.hera.lua +++ b/modulefiles/module_base.hera.lua @@ -33,6 +33,7 @@ load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) +load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None"))) -- MET/METplus are not available for use with spack-stack, yet --load(pathJoin("met", (os.getenv("met_ver") or "None"))) diff --git a/modulefiles/module_base.hercules.lua b/modulefiles/module_base.hercules.lua index d9c8f5ed0b..6f2b94dbda 100644 --- a/modulefiles/module_base.hercules.lua +++ b/modulefiles/module_base.hercules.lua @@ -35,6 +35,7 @@ load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) +load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) diff --git a/modulefiles/module_base.jet.lua b/modulefiles/module_base.jet.lua index 64d35da57a..17365c0c39 100644 --- a/modulefiles/module_base.jet.lua +++ b/modulefiles/module_base.jet.lua @@ -33,6 +33,7 @@ load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) +load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) diff --git a/modulefiles/module_base.orion.lua b/modulefiles/module_base.orion.lua index 65486855d0..e3037b9bb7 100644 --- a/modulefiles/module_base.orion.lua +++ b/modulefiles/module_base.orion.lua @@ -31,6 +31,7 @@ load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) +load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None"))) -- MET/METplus are not yet supported with spack-stack --load(pathJoin("met", (os.getenv("met_ver") or "None"))) diff --git a/modulefiles/module_base.s4.lua b/modulefiles/module_base.s4.lua index d99a93c3f4..59d618e038 100644 --- a/modulefiles/module_base.s4.lua +++ b/modulefiles/module_base.s4.lua @@ -30,6 +30,7 @@ load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) +load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) diff --git a/parm/config/gefs/config.oceanice_products b/parm/config/gefs/config.oceanice_products new file mode 120000 index 0000000000..f6cf9cd60b --- /dev/null +++ b/parm/config/gefs/config.oceanice_products @@ -0,0 +1 @@ +../gfs/config.oceanice_products \ No newline at end of file diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index a4ae76d7fb..18750d1192 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -19,7 +19,7 @@ if (( $# != 1 )); then echo "tracker genesis genesis_fsu" echo "verfozn verfrad vminmon fit2obs metp arch cleanup" echo "eobs ediag eomg eupd ecen esfc efcs epos earc" - echo "init_chem mom6ic ocnpost" + echo "init_chem mom6ic" echo "waveinit waveprep wavepostsbs wavepostbndpnt wavepostbndpntbll wavepostpnt" echo "wavegempak waveawipsbulls waveawipsgridded" echo "postsnd awips gempak npoess" @@ -218,6 +218,14 @@ case ${step} in export is_exclusive=True ;; + "oceanice_products") + export wtime_oceanice_products="00:15:00" + export npe_oceanice_products=1 + export npe_node_oceanice_products=1 + export nth_oceanice_products=1 + export memory_oceanice_products="96GB" + ;; + "wavepostsbs") export wtime_wavepostsbs="03:00:00" export npe_wavepostsbs=1 diff --git a/parm/config/gfs/config.com b/parm/config/gfs/config.com index db648b5866..1f046fdef6 100644 --- a/parm/config/gfs/config.com +++ b/parm/config/gfs/config.com @@ -80,15 +80,16 @@ declare -rx COM_OCEAN_HISTORY_TMPL=${COM_BASE}'/model_data/ocean/history' declare -rx COM_OCEAN_RESTART_TMPL=${COM_BASE}'/model_data/ocean/restart' declare -rx COM_OCEAN_INPUT_TMPL=${COM_BASE}'/model_data/ocean/input' declare -rx COM_OCEAN_ANALYSIS_TMPL=${COM_BASE}'/analysis/ocean' -declare -rx COM_OCEAN_2D_TMPL=${COM_BASE}'/products/ocean/2D' -declare -rx COM_OCEAN_3D_TMPL=${COM_BASE}'/products/ocean/3D' -declare -rx COM_OCEAN_XSECT_TMPL=${COM_BASE}'/products/ocean/xsect' +declare -rx COM_OCEAN_NETCDF_TMPL=${COM_BASE}'/products/ocean/netcdf' declare -rx COM_OCEAN_GRIB_TMPL=${COM_BASE}'/products/ocean/grib2' declare -rx COM_OCEAN_GRIB_GRID_TMPL=${COM_OCEAN_GRIB_TMPL}'/${GRID}' declare -rx COM_ICE_INPUT_TMPL=${COM_BASE}'/model_data/ice/input' declare -rx COM_ICE_HISTORY_TMPL=${COM_BASE}'/model_data/ice/history' declare -rx COM_ICE_RESTART_TMPL=${COM_BASE}'/model_data/ice/restart' +declare -rx COM_ICE_NETCDF_TMPL=${COM_BASE}'/products/ice/netcdf' +declare -rx COM_ICE_GRIB_TMPL=${COM_BASE}'/products/ice/grib2' +declare -rx COM_ICE_GRIB_GRID_TMPL=${COM_ICE_GRIB_TMPL}'/${GRID}' declare -rx COM_CHEM_HISTORY_TMPL=${COM_BASE}'/model_data/chem/history' declare -rx COM_CHEM_ANALYSIS_TMPL=${COM_BASE}'/analysis/chem' diff --git a/parm/config/gfs/config.oceanice_products b/parm/config/gfs/config.oceanice_products new file mode 100644 index 0000000000..bea70c21cc --- /dev/null +++ b/parm/config/gfs/config.oceanice_products @@ -0,0 +1,15 @@ +#! /usr/bin/env bash + +########## config.oceanice_products ########## + +echo "BEGIN: config.oceanice_products" + +# Get task specific resources +source "${EXPDIR}/config.resources" oceanice_products + +export OCEANICEPRODUCTS_CONFIG="${HOMEgfs}/parm/post/oceanice_products.yaml" + +# No. of forecast hours to process in a single job +export NFHRS_PER_GROUP=3 + +echo "END: config.oceanice_products" diff --git a/parm/config/gfs/config.ocnpost b/parm/config/gfs/config.ocnpost deleted file mode 100644 index 851c476e6c..0000000000 --- a/parm/config/gfs/config.ocnpost +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env bash - -########## config.ocnpost ########## - -echo "BEGIN: config.ocnpost" - -# Get task specific resources -source "${EXPDIR}/config.resources" ocnpost - -# Convert netcdf files to grib files using post job -#------------------------------------------- -case "${OCNRES}" in - "025") export MAKE_OCN_GRIB="YES";; - "050") export MAKE_OCN_GRIB="NO";; - "100") export MAKE_OCN_GRIB="NO";; - "500") export MAKE_OCN_GRIB="NO";; - *) export MAKE_OCN_GRIB="NO";; -esac - -if [[ "${machine}" = "WCOSS2" ]] || [[ "${machine}" = "HERCULES" ]]; then - #Currently the conversion to netcdf uses NCL which is not on WCOSS2 or HERCULES - #This should be removed when this is updated - export MAKE_OCN_GRIB="NO" -fi - -# No. of forecast hours to process in a single job -export NFHRS_PER_GROUP=3 - -echo "END: config.ocnpost" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index b746a4b32a..ced6e6a3d8 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -19,7 +19,7 @@ if (( $# != 1 )); then echo "tracker genesis genesis_fsu" echo "verfozn verfrad vminmon fit2obs metp arch cleanup" echo "eobs ediag eomg eupd ecen esfc efcs epos earc" - echo "init_chem mom6ic ocnpost" + echo "init_chem mom6ic oceanice_products" echo "waveinit waveprep wavepostsbs wavepostbndpnt wavepostbndpntbll wavepostpnt" echo "wavegempak waveawipsbulls waveawipsgridded" echo "postsnd awips gempak npoess" @@ -651,17 +651,12 @@ case ${step} in unset NTASKS_TOT ;; - "ocnpost") - export wtime_ocnpost="00:30:00" - export npe_ocnpost=1 - export npe_node_ocnpost=1 - export nth_ocnpost=1 - export memory_ocnpost="96G" - if [[ ${machine} == "JET" ]]; then - # JET only has 88GB of requestable memory per node - # so a second node is required to meet the requiremtn - npe_ocnpost=2 - fi + "oceanice_products") + export wtime_oceanice_products="00:15:00" + export npe_oceanice_products=1 + export npe_node_oceanice_products=1 + export nth_oceanice_products=1 + export memory_oceanice_products="96GB" ;; "upp") @@ -671,6 +666,7 @@ case ${step} in ;; "C192" | "C384" | "C768") export npe_upp=120 + export memory_upp="48GB" ;; *) echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}" diff --git a/parm/post/oceanice_products.yaml b/parm/post/oceanice_products.yaml new file mode 100644 index 0000000000..44b4094c56 --- /dev/null +++ b/parm/post/oceanice_products.yaml @@ -0,0 +1,75 @@ +ocnicepost: + executable: "ocnicepost.x" + namelist: + debug: False + fix_data: + mkdir: + - "{{ DATA }}" + copy: + - ["{{ HOMEgfs }}/exec/ocnicepost.x", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/parm/post/ocnicepost.nml.jinja2", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/parm/post/{{ component }}.csv", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/fix/mom6/post/{{ model_grid }}/tripole.{{ model_grid }}.Bu.to.Ct.bilinear.nc", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/fix/mom6/post/{{ model_grid }}/tripole.{{ model_grid }}.Cu.to.Ct.bilinear.nc", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/fix/mom6/post/{{ model_grid }}/tripole.{{ model_grid }}.Cv.to.Ct.bilinear.nc", "{{ DATA }}/"] + {% for grid in product_grids %} + - ["{{ HOMEgfs }}/fix/mom6/post/{{ model_grid }}/tripole.{{ model_grid }}.Ct.to.rect.{{ grid }}.bilinear.nc", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/fix/mom6/post/{{ model_grid }}/tripole.{{ model_grid }}.Ct.to.rect.{{ grid }}.conserve.nc", "{{ DATA }}/"] + - ["{{ HOMEgfs }}/fix/mom6/post/template.global.{{ grid }}.gb2", "{{ DATA }}/"] + {% endfor %} + +nc2grib2: + script: "{{ HOMEgfs }}/ush/oceanice_nc2grib2.sh" + +ocean: + namelist: + ftype: "ocean" + maskvar: "temp" + sinvar: "sin_rot" + cosvar: "cos_rot" + angvar: "" + {% if model_grid == 'mx025' or model_grid == 'mx050' or model_grid == 'mx100' %} + ocean_levels: [5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 105, 115, 125, 135, 145, 155, 165, 175, 185, 195, 205, 215, 225.86945, 241.06255, 266.5239, 308.7874, 373.9288, 467.3998, 593.87915, 757.1453, 959.97325, 1204.059, 1489.9735, 1817.1455, 2183.879, 2587.3995, 3023.9285, 3488.7875, 3976.524, 4481.0625] + {% elif model_grid == 'mx500' %} + ocean_levels: [5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 105, 115, 125, 135, 145, 155, 165, 175, 185, 195, 205, 215, 225.86945, 241.06255, 266.5239] + {% endif %} + subset: ['SSH', 'SST', 'SSS', 'speed', 'MLD_003', 'latent', 'sensible', 'SW', 'LW', 'LwLatSens', 'Heat_PmE', 'SSU', 'SSV', 'taux', 'tauy', 'temp', 'so', 'uo', 'vo'] + data_in: + copy: + - ["{{ COM_OCEAN_HISTORY }}/{{ RUN }}.ocean.t{{ current_cycle | strftime('%H') }}z.6hr_avg.f{{ '%03d' % forecast_hour }}.nc", "{{ DATA }}/ocean.nc"] + data_out: + mkdir: + - "{{ COM_OCEAN_NETCDF }}" + {% for grid in product_grids %} + - "{{ COM_OCEAN_GRIB }}/{{ grid }}" + {% endfor %} + copy: + - ["{{ DATA }}/ocean_subset.nc", "{{ COM_OCEAN_NETCDF }}/{{ RUN }}.ocean.t{{ current_cycle | strftime('%H') }}z.native.f{{ '%03d' % forecast_hour }}.nc"] + {% for grid in product_grids %} + - ["{{ DATA }}/ocean.{{ grid }}.grib2", "{{ COM_OCEAN_GRIB }}/{{ grid }}/{{ RUN }}.ocean.t{{ current_cycle | strftime('%H') }}z.{{ grid }}.f{{ '%03d' % forecast_hour }}.grib2"] + - ["{{ DATA }}/ocean.{{ grid }}.grib2.idx", "{{ COM_OCEAN_GRIB }}/{{ grid }}/{{ RUN }}.ocean.t{{ current_cycle | strftime('%H') }}z.{{ grid }}.f{{ '%03d' % forecast_hour }}.grib2.idx"] + {% endfor %} + +ice: + namelist: + ftype: "ice" + maskvar: "tmask" + sinvar: "" + cosvar: "" + angvar: "ANGLET" + subset: ['hi_h', 'hs_h', 'aice_h', 'Tsfc_h', 'uvel_h', 'vvel_h', 'frzmlt_h', 'albsni_h', 'mlt_onset_h', 'frz_onset_h'] + data_in: + copy: + - ["{{ COM_ICE_HISTORY }}/{{ RUN }}.ice.t{{ current_cycle | strftime('%H') }}z.6hr_avg.f{{ '%03d' % forecast_hour }}.nc", "{{ DATA }}/ice.nc"] + data_out: + mkdir: + - "{{ COM_ICE_NETCDF }}" + {% for grid in product_grids %} + - "{{ COM_ICE_GRIB }}/{{ grid }}" + {% endfor %} + copy: + - ["{{ DATA }}/ice_subset.nc", "{{ COM_ICE_NETCDF }}/{{ RUN }}.ice.t{{ current_cycle | strftime('%H') }}z.native.f{{ '%03d' % forecast_hour }}.nc"] + {% for grid in product_grids %} + - ["{{ DATA }}/ice.{{ grid }}.grib2", "{{ COM_ICE_GRIB }}/{{ grid }}/{{ RUN }}.ice.t{{ current_cycle | strftime('%H') }}z.{{ grid }}.f{{ '%03d' % forecast_hour }}.grib2"] + - ["{{ DATA }}/ice.{{ grid }}.grib2.idx", "{{ COM_ICE_GRIB }}/{{ grid }}/{{ RUN }}.ice.t{{ current_cycle | strftime('%H') }}z.{{ grid }}.f{{ '%03d' % forecast_hour }}.grib2.idx"] + {% endfor %} diff --git a/scripts/exglobal_archive.sh b/scripts/exglobal_archive.sh index 2f7e3be972..833b06bd98 100755 --- a/scripts/exglobal_archive.sh +++ b/scripts/exglobal_archive.sh @@ -182,12 +182,12 @@ if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then targrp_list="${targrp_list} gfswave" fi - if [ "${DO_OCN}" = "YES" ]; then - targrp_list="${targrp_list} ocn_ice_grib2_0p5 ocn_ice_grib2_0p25 ocn_2D ocn_3D ocn_xsect ocn_daily gfs_flux_1p00" + if [[ "${DO_OCN}" == "YES" ]]; then + targrp_list="${targrp_list} ocean_6hravg ocean_daily ocean_grib2 gfs_flux_1p00" fi - if [ "${DO_ICE}" = "YES" ]; then - targrp_list="${targrp_list} ice" + if [[ "${DO_ICE}" == "YES" ]]; then + targrp_list="${targrp_list} ice_6hravg ice_grib2" fi # Aerosols diff --git a/scripts/exglobal_forecast.sh b/scripts/exglobal_forecast.sh index eebc9e59c3..c07cde3004 100755 --- a/scripts/exglobal_forecast.sh +++ b/scripts/exglobal_forecast.sh @@ -105,9 +105,11 @@ common_predet echo "MAIN: Loading variables before determination of run type" FV3_predet +[[ ${cplflx} = .true. ]] && CMEPS_predet [[ ${cplflx} = .true. ]] && MOM6_predet [[ ${cplwav} = .true. ]] && WW3_predet [[ ${cplice} = .true. ]] && CICE_predet +[[ ${cplchm} = .true. ]] && GOCART_predet echo "MAIN: Variables before determination of run type loaded" echo "MAIN: Determining run type" @@ -119,6 +121,7 @@ echo "MAIN: RUN Type Determined" echo "MAIN: Post-determination set up of run type" FV3_postdet +[[ ${cplflx} = .true. ]] && CMEPS_postdet [[ ${cplflx} = .true. ]] && MOM6_postdet [[ ${cplwav} = .true. ]] && WW3_postdet [[ ${cplice} = .true. ]] && CICE_postdet @@ -154,6 +157,7 @@ ${ERRSCRIPT} || exit "${err}" FV3_out [[ ${cplflx} = .true. ]] && MOM6_out +[[ ${cplflx} = .true. ]] && CMEPS_out [[ ${cplwav} = .true. ]] && WW3_out [[ ${cplice} = .true. ]] && CICE_out [[ ${cplchm} = .true. ]] && GOCART_out diff --git a/scripts/exglobal_oceanice_products.py b/scripts/exglobal_oceanice_products.py new file mode 100755 index 0000000000..0f8e2e0d6d --- /dev/null +++ b/scripts/exglobal_oceanice_products.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 + +import os + +from wxflow import AttrDict, Logger, logit, cast_strdict_as_dtypedict +from pygfs.task.oceanice_products import OceanIceProducts + +# initialize root logger +logger = Logger(level=os.environ.get("LOGGING_LEVEL", "DEBUG"), colored_log=True) + + +@logit(logger) +def main(): + + config = cast_strdict_as_dtypedict(os.environ) + + # Instantiate the OceanIce object + oceanice = OceanIceProducts(config) + + # Pull out all the configuration keys needed to run the rest of steps + keys = ['HOMEgfs', 'DATA', 'current_cycle', 'RUN', 'NET', + 'COM_OCEAN_HISTORY', 'COM_OCEAN_GRIB', + 'COM_ICE_HISTORY', 'COM_ICE_GRIB', + 'APRUN_OCNICEPOST', + 'component', 'forecast_hour', 'valid_datetime', 'avg_period', + 'model_grid', 'product_grids', 'oceanice_yaml'] + oceanice_dict = AttrDict() + for key in keys: + oceanice_dict[key] = oceanice.task_config[key] + + # Initialize the DATA/ directory; copy static data + oceanice.initialize(oceanice_dict) + + for grid in oceanice_dict.product_grids: + + logger.info(f"Processing {grid} grid") + + # Configure DATA/ directory for execution; prepare namelist etc. + oceanice.configure(oceanice_dict, grid) + + # Run the oceanice post executable to interpolate and create grib2 files + oceanice.execute(oceanice_dict, grid) + + # Subset raw model data to create netCDF products + oceanice.subset(oceanice_dict) + + # Copy processed output from execute and subset + oceanice.finalize(oceanice_dict) + + +if __name__ == '__main__': + main() diff --git a/scripts/run_reg2grb2.sh b/scripts/run_reg2grb2.sh deleted file mode 100755 index ab2c80043e..0000000000 --- a/scripts/run_reg2grb2.sh +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" - -#requires grib_util module - -MOM6REGRID=${MOM6REGRID:-${HOMEgfs}} -export mask_file="${MOM6REGRID}/fix/reg2grb2/mask.0p25x0p25.grb2" - -# offline testing: -#export DATA= -#export icefile=$DATA/DATA0p5/icer2012010106.01.2012010100_0p5x0p5.nc -#export ocnfile=$DATA/DATA0p5/ocnr2012010106.01.2012010100_0p5x0p5.nc -#export outfile=$DATA/DATA0p5/out/ocnh2012010106.01.2012010100.grb2 -# -# workflow testing: -export icefile="icer${VDATE}.${ENSMEM}.${IDATE}_0p25x0p25_CICE.nc" -export ocnfile="ocnr${VDATE}.${ENSMEM}.${IDATE}_0p25x0p25_MOM6.nc" -export outfile="ocn_ice${VDATE}.${ENSMEM}.${IDATE}_0p25x0p25.grb2" -export outfile0p5="ocn_ice${VDATE}.${ENSMEM}.${IDATE}_0p5x0p5.grb2" - -export mfcstcpl=${mfcstcpl:-1} -export IGEN_OCNP=${IGEN_OCNP:-197} - -# PT This is the forecast date -export year=${VDATE:0:4} -export month=${VDATE:4:2} -export day=${VDATE:6:2} -export hour=${VDATE:8:2} - -# PT This is the initialization date -export syear=${IDATE:0:4} -export smonth=${IDATE:4:2} -export sday=${IDATE:6:2} -export shour=${IDATE:8:2} - -# PT Need to get this from above - could be 6 or 1 hour -export hh_inc_ocn=6 -# -# set for 1p0 lat-lon -#export im=360 -#export jm=181 -# export km=40 -#export imo=360 -#export jmo=181 -# -# set for 0p5 lat-lon -#export im=720 -#export jm=361 -#export km=40 -#export imo=720 -#export jmo=361 -# -# set for 0p25 lat-lon -export im=1440 -export jm=721 -export imo=1440 -export jmo=721 -export km=40 - -export flats=-90. -export flatn=90. -export flonw=0.0 -export flone=359.75 - -ln -sf "${mask_file}" ./iceocnpost.g2 -${executable} > "reg2grb2.${VDATE}.${IDATE}.out" - -# interpolated from 0p25 to 0p5 grid -grid2p05="0 6 0 0 0 0 0 0 720 361 0 0 90000000 0 48 -90000000 359500000 500000 500000 0" -${COPYGB2} -g "${grid2p05}" -i0 -x "${outfile}" "${outfile0p5}" - diff --git a/scripts/run_regrid.sh b/scripts/run_regrid.sh deleted file mode 100755 index 103e9a759e..0000000000 --- a/scripts/run_regrid.sh +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" - -MOM6REGRID="${MOM6REGRID:-${HOMEgfs}}" -export EXEC_DIR="${MOM6REGRID}/exec" -export USH_DIR="${MOM6REGRID}/ush" -export COMOUTocean="${COM_OCEAN_HISTORY}" -export COMOUTice="${COM_ICE_HISTORY}" -export IDATE="${IDATE}" -export VDATE="${VDATE}" -export ENSMEM="${ENSMEM}" -export FHR="${fhr}" -export DATA="${DATA}" -export FIXreg2grb2="${FIXreg2grb2}" - -###### DO NOT MODIFY BELOW UNLESS YOU KNOW WHAT YOU ARE DOING ####### -#Need NCL module to be loaded: -echo "${NCARG_ROOT}" -export NCL="${NCARG_ROOT}/bin/ncl" - -ls -alrt - -${NCL} "${USH_DIR}/icepost.ncl" -${NCL} "${USH_DIR}/ocnpost.ncl" -##################################################################### - diff --git a/sorc/gfs_utils.fd b/sorc/gfs_utils.fd index 7d3b08e87c..6ddd1460d9 160000 --- a/sorc/gfs_utils.fd +++ b/sorc/gfs_utils.fd @@ -1 +1 @@ -Subproject commit 7d3b08e87c07cfa54079442d245ac7e9ab1cd9f4 +Subproject commit 6ddd1460d9f7c292f04573ab2bdc988a05ed618b diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 6d5d40a354..39e2a7785f 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -107,7 +107,6 @@ for dir in aer \ lut \ mom6 \ orog \ - reg2grb2 \ sfc_climo \ ugwd \ verif \ @@ -135,16 +134,20 @@ for file in postxconfig-NT-GEFS-F00.txt postxconfig-NT-GEFS.txt postxconfig-NT-G postxconfig-NT-GFS-ANL.txt postxconfig-NT-GFS-F00.txt postxconfig-NT-GFS-FLUX-F00.txt \ postxconfig-NT-GFS.txt postxconfig-NT-GFS-FLUX.txt postxconfig-NT-GFS-GOES.txt \ postxconfig-NT-GFS-F00-TWO.txt postxconfig-NT-GFS-TWO.txt \ - params_grib2_tbl_new post_tag_gfs128 post_tag_gfs65 nam_micro_lookup.dat + params_grib2_tbl_new post_tag_gfs128 post_tag_gfs65 nam_micro_lookup.dat do ${LINK_OR_COPY} "${HOMEgfs}/sorc/upp.fd/parm/${file}" . done for file in optics_luts_DUST.dat optics_luts_DUST_nasa.dat optics_luts_NITR_nasa.dat \ optics_luts_SALT.dat optics_luts_SALT_nasa.dat optics_luts_SOOT.dat optics_luts_SOOT_nasa.dat \ - optics_luts_SUSO.dat optics_luts_SUSO_nasa.dat optics_luts_WASO.dat optics_luts_WASO_nasa.dat + optics_luts_SUSO.dat optics_luts_SUSO_nasa.dat optics_luts_WASO.dat optics_luts_WASO_nasa.dat do ${LINK_OR_COPY} "${HOMEgfs}/sorc/upp.fd/fix/chem/${file}" . done +for file in ice.csv ocean.csv ocnicepost.nml.jinja2 +do + ${LINK_OR_COPY} "${HOMEgfs}/sorc/gfs_utils.fd/parm/ocnicepost/${file}" . +done cd "${HOMEgfs}/scripts" || exit 8 ${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_utils.fd/scripts/exemcsfc_global_sfc_prep.sh" . @@ -243,7 +246,7 @@ cd "${HOMEgfs}/exec" || exit 1 for utilexe in fbwndgfs.x gaussian_sfcanl.x gfs_bufr.x supvit.x syndat_getjtbul.x \ syndat_maksynrc.x syndat_qctropcy.x tocsbufr.x overgridid.x \ - mkgfsawps.x enkf_chgres_recenter_nc.x tave.x vint.x reg2grb2.x + mkgfsawps.x enkf_chgres_recenter_nc.x tave.x vint.x ocnicepost.x do [[ -s "${utilexe}" ]] && rm -f "${utilexe}" ${LINK_OR_COPY} "${HOMEgfs}/sorc/gfs_utils.fd/install/bin/${utilexe}" . @@ -397,7 +400,6 @@ for prog in enkf_chgres_recenter_nc.fd \ mkgfsawps.fd \ overgridid.fd \ rdbfmsua.fd \ - reg2grb2.fd \ supvit.fd \ syndat_getjtbul.fd \ syndat_maksynrc.fd \ @@ -405,7 +407,8 @@ for prog in enkf_chgres_recenter_nc.fd \ tave.fd \ tocsbufr.fd \ vint.fd \ - webtitle.fd + webtitle.fd \ + ocnicepost.fd do if [[ -d "${prog}" ]]; then rm -rf "${prog}"; fi ${LINK_OR_COPY} "gfs_utils.fd/src/${prog}" . diff --git a/sorc/ncl.setup b/sorc/ncl.setup deleted file mode 100644 index b4981689db..0000000000 --- a/sorc/ncl.setup +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set +x -case ${target} in - 'jet'|'hera') - module load ncl/6.5.0 - export NCARG_LIB=${NCARG_ROOT}/lib - ;; - *) - echo "[${BASH_SOURCE[0]}]: unknown ${target}" - ;; -esac diff --git a/ush/forecast_det.sh b/ush/forecast_det.sh index e1a2a49a7e..198df6505f 100755 --- a/ush/forecast_det.sh +++ b/ush/forecast_det.sh @@ -8,7 +8,7 @@ ## This script is a definition of functions. ##### -# For all non-evironment variables +# For all non-environment variables # Cycling and forecast hour specific parameters FV3_det(){ diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh index 8e40d6c881..052e549251 100755 --- a/ush/forecast_postdet.sh +++ b/ush/forecast_postdet.sh @@ -266,10 +266,10 @@ EOF # inline post fix files if [[ ${WRITE_DOPOST} = ".true." ]]; then - ${NLN} "${PARM_POST}/post_tag_gfs${LEVS}" "${DATA}/itag" - ${NLN} "${FLTFILEGFS:-${PARM_POST}/postxconfig-NT-GFS-TWO.txt}" "${DATA}/postxconfig-NT.txt" - ${NLN} "${FLTFILEGFSF00:-${PARM_POST}/postxconfig-NT-GFS-F00-TWO.txt}" "${DATA}/postxconfig-NT_FH00.txt" - ${NLN} "${POSTGRB2TBL:-${PARM_POST}/params_grib2_tbl_new}" "${DATA}/params_grib2_tbl_new" + ${NLN} "${PARMgfs}/post/post_tag_gfs${LEVS}" "${DATA}/itag" + ${NLN} "${FLTFILEGFS:-${PARMgfs}/post/postxconfig-NT-GFS-TWO.txt}" "${DATA}/postxconfig-NT.txt" + ${NLN} "${FLTFILEGFSF00:-${PARMgfs}/post/postxconfig-NT-GFS-F00-TWO.txt}" "${DATA}/postxconfig-NT_FH00.txt" + ${NLN} "${POSTGRB2TBL:-${PARMgfs}/post/params_grib2_tbl_new}" "${DATA}/params_grib2_tbl_new" fi #------------------------------------------------------------------ @@ -463,8 +463,6 @@ EOF LONB_STP=${LONB_STP:-${LONB_CASE}} LATB_STP=${LATB_STP:-${LATB_CASE}} cd "${DATA}" || exit 1 - if [[ ! -d ${COM_ATMOS_HISTORY} ]]; then mkdir -p "${COM_ATMOS_HISTORY}"; fi - if [[ ! -d ${COM_ATMOS_MASTER} ]]; then mkdir -p "${COM_ATMOS_MASTER}"; fi if [[ "${QUILTING}" = ".true." ]] && [[ "${OUTPUT_GRID}" = "gaussian_grid" ]]; then for fhr in ${FV3_OUTPUT_FH}; do local FH3=$(printf %03i "${fhr}") @@ -503,7 +501,6 @@ FV3_out() { # Copy FV3 restart files if [[ ${RUN} =~ "gdas" ]]; then cd "${DATA}/RESTART" - mkdir -p "${COM_ATMOS_RESTART}" local idate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${restart_interval} hours" +%Y%m%d%H) while [[ ${idate} -le ${forecast_end_cycle} ]]; do for file in "${idate:0:8}.${idate:8:2}0000."*; do @@ -604,8 +601,6 @@ WW3_postdet() { ${NLN} "${wavcurfile}" "${DATA}/current.${WAVECUR_FID}" fi - if [[ ! -d ${COM_WAVE_HISTORY} ]]; then mkdir -p "${COM_WAVE_HISTORY}"; fi - # Link output files cd "${DATA}" if [[ ${waveMULTIGRID} = ".true." ]]; then @@ -682,6 +677,7 @@ MOM6_postdet() { ${NLN} "${COM_OCEAN_RESTART_PREV}/${sPDY}.${scyc}0000.MOM.res.nc" "${DATA}/INPUT/MOM.res.nc" case ${OCNRES} in "025") + local nn for nn in $(seq 1 4); do if [[ -f "${COM_OCEAN_RESTART_PREV}/${sPDY}.${scyc}0000.MOM.res_${nn}.nc" ]]; then ${NLN} "${COM_OCEAN_RESTART_PREV}/${sPDY}.${scyc}0000.MOM.res_${nn}.nc" "${DATA}/INPUT/MOM.res_${nn}.nc" @@ -700,7 +696,7 @@ MOM6_postdet() { fi # Copy MOM6 fixed files - ${NCP} "${FIXmom}/${OCNRES}/"* "${DATA}/INPUT/" + ${NCP} "${FIXmom}/${OCNRES}/"* "${DATA}/INPUT/" # TODO: These need to be explicit # Copy coupled grid_spec spec_file="${FIXcpl}/a${CASE}o${OCNRES}/grid_spec.nc" @@ -711,27 +707,6 @@ MOM6_postdet() { exit 3 fi - # Copy mediator restart files to RUNDIR # TODO: mediator should have its own CMEPS_postdet() function - if [[ ${warm_start} = ".true." ]]; then - local mediator_file="${COM_MED_RESTART}/${PDY}.${cyc}0000.ufs.cpld.cpl.r.nc" - if [[ -f "${mediator_file}" ]]; then - ${NCP} "${mediator_file}" "${DATA}/ufs.cpld.cpl.r.nc" - rm -f "${DATA}/rpointer.cpl" - touch "${DATA}/rpointer.cpl" - echo "ufs.cpld.cpl.r.nc" >> "${DATA}/rpointer.cpl" - else - # We have a choice to make here. - # Either we can FATAL ERROR out, or we can let the coupling fields initialize from zero - # cmeps_run_type is determined based on the availability of the mediator restart file - echo "WARNING: ${mediator_file} does not exist for warm_start = .true., initializing!" - #echo "FATAL ERROR: ${mediator_file} must exist for warm_start = .true. and does not, ABORT!" - #exit 4 - fi - else - # This is a cold start, so initialize the coupling fields from zero - export cmeps_run_type="startup" - fi - # If using stochatic parameterizations, create a seed that does not exceed the # largest signed integer if [[ "${DO_OCN_SPPT}" = "YES" ]] || [[ "${DO_OCN_PERT_EPBL}" = "YES" ]]; then @@ -743,58 +718,53 @@ MOM6_postdet() { fi fi - # Create COMOUTocean - [[ ! -d ${COM_OCEAN_HISTORY} ]] && mkdir -p "${COM_OCEAN_HISTORY}" - # Link output files if [[ "${RUN}" =~ "gfs" || "${RUN}" =~ "gefs" ]]; then - # Link output files for RUN = gfs - - # TODO: get requirements on what files need to be written out and what these dates here are and what they mean + # Link output files for RUN = gfs|gefs - if [[ ! -d ${COM_OCEAN_HISTORY} ]]; then mkdir -p "${COM_OCEAN_HISTORY}"; fi + # Looping over MOM6 output hours + local fhr fhr3 last_fhr interval midpoint vdate vdate_mid source_file dest_file + for fhr in ${MOM6_OUTPUT_FH}; do + fhr3=$(printf %03i "${fhr}") - # Looping over FV3 output hours - # TODO: Need to define MOM6_OUTPUT_FH and control at some point for issue #1629 - for fhr in ${FV3_OUTPUT_FH}; do if [[ -z ${last_fhr:-} ]]; then - local last_fhr=${fhr} + last_fhr=${fhr} continue fi + (( interval = fhr - last_fhr )) (( midpoint = last_fhr + interval/2 )) - local vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) - local vdate_mid=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${midpoint} hours" +%Y%m%d%H) - + vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) + vdate_mid=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${midpoint} hours" +%Y%m%d%H) # Native model output uses window midpoint in the filename, but we are mapping that to the end of the period for COM - local source_file="ocn_${vdate_mid:0:4}_${vdate_mid:4:2}_${vdate_mid:6:2}_${vdate_mid:8:2}.nc" - local dest_file="ocn${vdate}.${ENSMEM}.${current_cycle}.nc" + source_file="ocn_${vdate_mid:0:4}_${vdate_mid:4:2}_${vdate_mid:6:2}_${vdate_mid:8:2}.nc" + dest_file="${RUN}.ocean.t${cyc}z.${interval}hr_avg.f${fhr3}.nc" ${NLN} "${COM_OCEAN_HISTORY}/${dest_file}" "${DATA}/${source_file}" - local source_file="ocn_daily_${vdate:0:4}_${vdate:4:2}_${vdate:6:2}.nc" - local dest_file=${source_file} - if [[ ! -a "${DATA}/${source_file}" ]]; then + # Daily output + if (( fhr > 0 & fhr % 24 == 0 )); then + source_file="ocn_daily_${vdate:0:4}_${vdate:4:2}_${vdate:6:2}.nc" + dest_file="${RUN}.ocean.t${cyc}z.daily.f${fhr3}.nc" ${NLN} "${COM_OCEAN_HISTORY}/${dest_file}" "${DATA}/${source_file}" fi - local last_fhr=${fhr} + last_fhr=${fhr} + done elif [[ "${RUN}" =~ "gdas" ]]; then # Link output files for RUN = gdas - # Save MOM6 backgrounds - for fhr in ${FV3_OUTPUT_FH}; do - local idatestr=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y_%m_%d_%H) + # Save (instantaneous) MOM6 backgrounds + for fhr in ${MOM6_OUTPUT_FH}; do local fhr3=$(printf %03i "${fhr}") - ${NLN} "${COM_OCEAN_HISTORY}/${RUN}.t${cyc}z.ocnf${fhr3}.nc" "${DATA}/ocn_da_${idatestr}.nc" + local vdatestr=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y_%m_%d_%H) + ${NLN} "${COM_OCEAN_HISTORY}/${RUN}.ocean.t${cyc}z.inst.f${fhr3}.nc" "${DATA}/ocn_da_${vdatestr}.nc" done fi - mkdir -p "${COM_OCEAN_RESTART}" - # Link ocean restarts from DATA to COM # Coarser than 1/2 degree has a single MOM restart ${NLN} "${COM_OCEAN_RESTART}/${forecast_end_cycle:0:8}.${forecast_end_cycle:8:2}0000.MOM.res.nc" "${DATA}/MOM6_RESTART/" @@ -809,10 +779,16 @@ MOM6_postdet() { ;; esac - # Loop over restart_interval frequency and link restarts from DATA to COM - local idate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${restart_interval} hours" +%Y%m%d%H) - while [[ ${idate} -lt ${forecast_end_cycle} ]]; do - local idatestr=$(date +%Y-%m-%d-%H -d "${idate:0:8} ${idate:8:2}") + if [[ "${RUN}" =~ "gdas" ]]; then + local interval idate + if [[ "${DOIAU}" = "YES" ]]; then + # Link restarts at the beginning of the next cycle from DATA to COM + interval=$(( assim_freq / 2 )) + idate=$(date --utc -d "${next_cycle:0:8} ${next_cycle:8:2} - ${interval} hours" +%Y%m%d%H) + else + # Link restarts at the middle of the next cycle from DATA to COM + idate="${next_cycle}" + fi ${NLN} "${COM_OCEAN_RESTART}/${idate:0:8}.${idate:8:2}0000.MOM.res.nc" "${DATA}/MOM6_RESTART/" case ${OCNRES} in "025") @@ -821,23 +797,7 @@ MOM6_postdet() { done ;; esac - local idate=$(date --utc -d "${idate:0:8} ${idate:8:2} + ${restart_interval} hours" +%Y%m%d%H) - done - - # TODO: mediator should have its own CMEPS_postdet() function - # Link mediator restarts from DATA to COM - # DANGER DANGER DANGER - Linking mediator restarts to COM causes the model to fail with a message like this below: - # Abort with message NetCDF: File exists && NC_NOCLOBBER in file pio-2.5.7/src/clib/pioc_support.c at line 2173 - # Instead of linking, copy the mediator files after the model finishes - #local COMOUTmed="${ROTDIR}/${RUN}.${PDY}/${cyc}/med" - #mkdir -p "${COMOUTmed}/RESTART" - #local idate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${restart_interval} hours" +%Y%m%d%H) - #while [[ ${idate} -le ${forecast_end_cycle} ]]; do - # local seconds=$(to_seconds ${idate:8:2}0000) # use function to_seconds from forecast_predet.sh to convert HHMMSS to seconds - # local idatestr="${idate:0:4}-${idate:4:2}-${idate:6:2}-${seconds}" - # ${NLN} "${COMOUTmed}/RESTART/${idate:0:8}.${idate:8:2}0000.ufs.cpld.cpl.r.nc" "${DATA}/RESTART/ufs.cpld.cpl.r.${idatestr}.nc" - # local idate=$(date --utc -d "${idate:0:8} ${idate:8:2} + ${restart_interval} hours" +%Y%m%d%H) - #done + fi echo "SUB ${FUNCNAME[0]}: MOM6 input data linked/copied" @@ -853,26 +813,8 @@ MOM6_out() { echo "SUB ${FUNCNAME[0]}: Copying output data for MOM6" # Copy MOM_input from DATA to COM_OCEAN_INPUT after the forecast is run (and successfull) - if [[ ! -d ${COM_OCEAN_INPUT} ]]; then mkdir -p "${COM_OCEAN_INPUT}"; fi ${NCP} "${DATA}/INPUT/MOM_input" "${COM_CONF}/ufs.MOM_input" - # TODO: mediator should have its own CMEPS_out() function - # Copy mediator restarts from DATA to COM - # Linking mediator restarts to COM causes the model to fail with a message. - # See MOM6_postdet() function for error message - mkdir -p "${COM_MED_RESTART}" - local idate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${restart_interval} hours" +%Y%m%d%H) - while [[ ${idate} -le ${forecast_end_cycle} ]]; do - local seconds=$(to_seconds "${idate:8:2}"0000) # use function to_seconds from forecast_predet.sh to convert HHMMSS to seconds - local idatestr="${idate:0:4}-${idate:4:2}-${idate:6:2}-${seconds}" - local mediator_file="${DATA}/RESTART/ufs.cpld.cpl.r.${idatestr}.nc" - if [[ -f ${mediator_file} ]]; then - ${NCP} "${DATA}/RESTART/ufs.cpld.cpl.r.${idatestr}.nc" "${COM_MED_RESTART}/${idate:0:8}.${idate:8:2}0000.ufs.cpld.cpl.r.nc" - else - echo "Mediator restart ${mediator_file} not found." - fi - local idate=$(date --utc -d "${idate:0:8} ${idate:8:2} + ${restart_interval} hours" +%Y%m%d%H) - done } CICE_postdet() { @@ -895,54 +837,40 @@ CICE_postdet() { ${NLN} "${FIXcice}/${ICERES}/${CICE_MASK}" "${DATA}/" ${NLN} "${FIXcice}/${ICERES}/${MESH_ICE}" "${DATA}/" - # Link CICE output files - if [[ ! -d "${COM_ICE_HISTORY}" ]]; then mkdir -p "${COM_ICE_HISTORY}"; fi - mkdir -p "${COM_ICE_RESTART}" + # Link iceh_ic file to COM. This is the initial condition file from CICE (f000) + # TODO: Is this file needed in COM? Is this going to be used for generating any products? + local vdate seconds vdatestr fhr fhr3 interval last_fhr + seconds=$(to_seconds "${current_cycle:8:2}0000") # convert HHMMSS to seconds + vdatestr="${current_cycle:0:4}-${current_cycle:4:2}-${current_cycle:6:2}-${seconds}" + ${NLN} "${COM_ICE_HISTORY}/${RUN}.ice.t${cyc}z.ic.nc" "${DATA}/CICE_OUTPUT/iceh_ic.${vdatestr}.nc" - if [[ "${RUN}" =~ "gfs" || "${RUN}" =~ "gefs" ]]; then - # Link output files for RUN = gfs - - # TODO: make these forecast output files consistent w/ GFS output - # TODO: Work w/ NB to determine appropriate naming convention for these files - - # TODO: consult w/ NB on how to improve on this. Gather requirements and more information on what these files are and how they are used to properly catalog them - local vdate seconds vdatestr fhr last_fhr - for fhr in ${FV3_OUTPUT_FH}; do - vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) - seconds=$(to_seconds "${vdate:8:2}0000") # convert HHMMSS to seconds - vdatestr="${vdate:0:4}-${vdate:4:2}-${vdate:6:2}-${seconds}" + # Link CICE forecast output files from DATA/CICE_OUTPUT to COM + local source_file dest_file + for fhr in ${CICE_OUTPUT_FH}; do + fhr3=$(printf %03i "${fhr}") - if [[ 10#${fhr} -eq 0 ]]; then - ${NLN} "${COM_ICE_HISTORY}/iceic${vdate}.${ENSMEM}.${current_cycle}.nc" "${DATA}/CICE_OUTPUT/iceh_ic.${vdatestr}.nc" - else - (( interval = fhr - last_fhr )) # Umm.. isn't this CICE_HISTFREQ_N in hours (currently set to FHOUT)? - ${NLN} "${COM_ICE_HISTORY}/ice${vdate}.${ENSMEM}.${current_cycle}.nc" "${DATA}/CICE_OUTPUT/iceh_$(printf "%0.2d" "${interval}")h.${vdatestr}.nc" - fi + if [[ -z ${last_fhr:-} ]]; then last_fhr=${fhr} - done + continue + fi - elif [[ "${RUN}" =~ "gdas" ]]; then + (( interval = fhr - last_fhr )) - # Link CICE generated initial condition file from DATA/CICE_OUTPUT to COMOUTice - # This can be thought of as the f000 output from the CICE model - local seconds vdatestr - seconds=$(to_seconds "${current_cycle:8:2}0000") # convert HHMMSS to seconds - vdatestr="${current_cycle:0:4}-${current_cycle:4:2}-${current_cycle:6:2}-${seconds}" - ${NLN} "${COM_ICE_HISTORY}/${RUN}.t${cyc}z.iceic.nc" "${DATA}/CICE_OUTPUT/iceh_ic.${vdatestr}.nc" - - # Link instantaneous CICE forecast output files from DATA/CICE_OUTPUT to COMOUTice - local vdate vdatestr seconds fhr fhr3 - fhr="${FHOUT}" - while [[ "${fhr}" -le "${FHMAX}" ]]; do - vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) - seconds=$(to_seconds "${vdate:8:2}0000") # convert HHMMSS to seconds - vdatestr="${vdate:0:4}-${vdate:4:2}-${vdate:6:2}-${seconds}" - fhr3=$(printf %03i "${fhr}") - ${NLN} "${COM_ICE_HISTORY}/${RUN}.t${cyc}z.icef${fhr3}.nc" "${DATA}/CICE_OUTPUT/iceh_inst.${vdatestr}.nc" - fhr=$((fhr + FHOUT)) - done + vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) + seconds=$(to_seconds "${vdate:8:2}0000") # convert HHMMSS to seconds + vdatestr="${vdate:0:4}-${vdate:4:2}-${vdate:6:2}-${seconds}" - fi + if [[ "${RUN}" =~ "gfs" || "${RUN}" =~ "gefs" ]]; then + source_file="iceh_$(printf "%0.2d" "${interval}")h.${vdatestr}.nc" + dest_file="${RUN}.ice.t${cyc}z.${interval}hr_avg.f${fhr3}.nc" + elif [[ "${RUN}" =~ "gdas" ]]; then + source_file="iceh_inst.${vdatestr}.nc" + dest_file="${RUN}.ice.t${cyc}z.inst.f${fhr3}.nc" + fi + ${NLN} "${COM_ICE_HISTORY}/${dest_file}" "${DATA}/CICE_OUTPUT/${source_file}" + + last_fhr=${fhr} + done # Link CICE restarts from CICE_RESTART to COMOUTice/RESTART # Loop over restart_interval and link restarts from DATA to COM @@ -966,7 +894,6 @@ CICE_out() { echo "SUB ${FUNCNAME[0]}: Copying output data for CICE" # Copy ice_in namelist from DATA to COMOUTice after the forecast is run (and successfull) - if [[ ! -d "${COM_ICE_INPUT}" ]]; then mkdir -p "${COM_ICE_INPUT}"; fi ${NCP} "${DATA}/ice_in" "${COM_CONF}/ufs.ice_in" } @@ -1004,8 +931,6 @@ GOCART_rc() { GOCART_postdet() { echo "SUB ${FUNCNAME[0]}: Linking output data for GOCART" - if [[ ! -d "${COM_CHEM_HISTORY}" ]]; then mkdir -p "${COM_CHEM_HISTORY}"; fi - for fhr in ${FV3_OUTPUT_FH}; do local vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d%H) @@ -1033,6 +958,56 @@ GOCART_out() { ${NCP} "${DATA}/gocart.inst_aod.${vdate:0:8}_${vdate:8:2}00z.nc4" \ "${COM_CHEM_HISTORY}/gocart.inst_aod.${vdate:0:8}_${vdate:8:2}00z.nc4" done +} + +CMEPS_postdet() { + echo "SUB ${FUNCNAME[0]}: Linking output data for CMEPS mediator" + + # Copy mediator restart files to RUNDIR + if [[ "${warm_start}" = ".true." ]]; then + local mediator_file="${COM_MED_RESTART}/${PDY}.${cyc}0000.ufs.cpld.cpl.r.nc" + if [[ -f "${mediator_file}" ]]; then + ${NCP} "${mediator_file}" "${DATA}/ufs.cpld.cpl.r.nc" + rm -f "${DATA}/rpointer.cpl" + touch "${DATA}/rpointer.cpl" + echo "ufs.cpld.cpl.r.nc" >> "${DATA}/rpointer.cpl" + else + # We have a choice to make here. + # Either we can FATAL ERROR out, or we can let the coupling fields initialize from zero + # cmeps_run_type is determined based on the availability of the mediator restart file + echo "WARNING: ${mediator_file} does not exist for warm_start = .true., initializing!" + #echo "FATAL ERROR: ${mediator_file} must exist for warm_start = .true. and does not, ABORT!" + #exit 4 + fi + fi + # Link mediator restarts from DATA to COM + # DANGER DANGER DANGER - Linking mediator restarts to COM causes the model to fail with a message like this below: + # Abort with message NetCDF: File exists && NC_NOCLOBBER in file pio-2.5.7/src/clib/pioc_support.c at line 2173 + # Instead of linking, copy the mediator files after the model finishes. See CMEPS_out() below. + #local rdate rdatestr seconds mediator_file + #rdate=${forecast_end_cycle} + #seconds=$(to_seconds "${rdate:8:2}"0000) # use function to_seconds from forecast_predet.sh to convert HHMMSS to seconds + #rdatestr="${rdate:0:4}-${rdate:4:2}-${rdate:6:2}-${seconds}" + #${NLN} "${COM_MED_RESTART}/${rdate:0:8}.${rdate:8:2}0000.ufs.cpld.cpl.r.nc" "${DATA}/CMEPS_RESTART/ufs.cpld.cpl.r.${rdatestr}.nc" + +} + +CMEPS_out() { + echo "SUB ${FUNCNAME[0]}: Copying output data for CMEPS mediator" + + # Linking mediator restarts to COM causes the model to fail with a message. + # Abort with message NetCDF: File exists && NC_NOCLOBBER in file pio-2.5.7/src/clib/pioc_support.c at line 2173 + # Copy mediator restarts from DATA to COM + local rdate rdatestr seconds mediator_file + rdate=${forecast_end_cycle} + seconds=$(to_seconds "${rdate:8:2}"0000) # use function to_seconds from forecast_predet.sh to convert HHMMSS to seconds + rdatestr="${rdate:0:4}-${rdate:4:2}-${rdate:6:2}-${seconds}" + mediator_file="${DATA}/CMEPS_RESTART/ufs.cpld.cpl.r.${rdatestr}.nc" + if [[ -f ${mediator_file} ]]; then + ${NCP} "${mediator_file}" "${COM_MED_RESTART}/${rdate:0:8}.${rdate:8:2}0000.ufs.cpld.cpl.r.nc" + else + echo "Mediator restart ${mediator_file} not found." + fi } diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh index 8f46ed6ea0..1aaa1a4b9d 100755 --- a/ush/forecast_predet.sh +++ b/ush/forecast_predet.sh @@ -8,9 +8,6 @@ ## This script is a definition of functions. ##### -# For all non-evironment variables -# Cycling and forecast hour specific parameters - to_seconds() { # Function to convert HHMMSS to seconds since 00Z local hhmmss=${1:?} @@ -50,21 +47,12 @@ common_predet(){ # shellcheck disable=SC2034 pwd=$(pwd) CDUMP=${CDUMP:-gdas} - CASE=${CASE:-C768} - CDATE=${CDATE:-2017032500} + CASE=${CASE:-C96} + CDATE=${CDATE:-"${PDY}${cyc}"} ENSMEM=${ENSMEM:-000} - FCSTEXECDIR=${FCSTEXECDIR:-${HOMEgfs}/exec} - FCSTEXEC=${FCSTEXEC:-ufs_model.x} - - # Directories. - FIXgfs=${FIXgfs:-${HOMEgfs}/fix} - - # Model specific stuff - PARM_POST=${PARM_POST:-${HOMEgfs}/parm/post} - # Define significant cycles - current_cycle=${CDATE} + current_cycle="${PDY}${cyc}" previous_cycle=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} - ${assim_freq} hours" +%Y%m%d%H) # ignore errors that variable isn't used # shellcheck disable=SC2034 @@ -88,23 +76,28 @@ common_predet(){ tcyc=${scyc} fi - mkdir -p "${COM_CONF}" + FHMIN=${FHMIN:-0} + FHMAX=${FHMAX:-9} + FHOUT=${FHOUT:-3} + FHMAX_HF=${FHMAX_HF:-0} + FHOUT_HF=${FHOUT_HF:-1} + + # Several model components share DATA/INPUT for input data + if [[ ! -d "${DATA}/INPUT" ]]; then mkdir -p "${DATA}/INPUT"; fi + + if [[ ! -d "${COM_CONF}" ]]; then mkdir -p "${COM_CONF}"; fi cd "${DATA}" || ( echo "FATAL ERROR: Unable to 'cd ${DATA}', ABORT!"; exit 8 ) } FV3_predet(){ echo "SUB ${FUNCNAME[0]}: Defining variables for FV3" - FHMIN=${FHMIN:-0} - FHMAX=${FHMAX:-9} - FHOUT=${FHOUT:-3} + + if [[ ! -d "${COM_ATMOS_HISTORY}" ]]; then mkdir -p "${COM_ATMOS_HISTORY}"; fi + if [[ ! -d "${COM_ATMOS_MASTER}" ]]; then mkdir -p "${COM_ATMOS_MASTER}"; fi + if [[ ! -d "${COM_ATMOS_RESTART}" ]]; then mkdir -p "${COM_ATMOS_RESTART}"; fi + FHZER=${FHZER:-6} FHCYC=${FHCYC:-24} - FHMAX_HF=${FHMAX_HF:-0} - FHOUT_HF=${FHOUT_HF:-1} - NSOUT=${NSOUT:-"-1"} - FDIAG=${FHOUT} - if (( FHMAX_HF > 0 && FHOUT_HF > 0 )); then FDIAG=${FHOUT_HF}; fi - WRITE_DOPOST=${WRITE_DOPOST:-".false."} restart_interval=${restart_interval:-${FHMAX}} # restart_interval = 0 implies write restart at the END of the forecast i.e. at FHMAX if [[ ${restart_interval} -eq 0 ]]; then @@ -112,8 +105,8 @@ FV3_predet(){ fi # Convert output settings into an explicit list for FV3 - # NOTE: FV3_OUTPUT_FH is also currently used in other components - # TODO: Have a seperate control for other components to address issue #1629 + # Ignore "not used" warning + # shellcheck disable=SC2034 FV3_OUTPUT_FH="" local fhr=${FHMIN} if (( FHOUT_HF > 0 && FHMAX_HF > 0 )); then @@ -122,12 +115,6 @@ FV3_predet(){ fi FV3_OUTPUT_FH="${FV3_OUTPUT_FH} $(seq -s ' ' "${fhr}" "${FHOUT}" "${FHMAX}")" - # Model resolution specific parameters - DELTIM=${DELTIM:-225} - layout_x=${layout_x:-8} - layout_y=${layout_y:-16} - LEVS=${LEVS:-65} - # Other options # ignore errors that variable isn't used # shellcheck disable=SC2034 @@ -141,18 +128,8 @@ FV3_predet(){ # Model config options ntiles=6 - TYPE=${TYPE:-"nh"} # choices: nh, hydro - MONO=${MONO:-"non-mono"} # choices: mono, non-mono - - QUILTING=${QUILTING:-".true."} - OUTPUT_GRID=${OUTPUT_GRID:-"gaussian_grid"} - WRITE_NEMSIOFLIP=${WRITE_NEMSIOFLIP:-".true."} - WRITE_FSYNCFLAG=${WRITE_FSYNCFLAG:-".true."} - rCDUMP=${rCDUMP:-${CDUMP}} - mkdir -p "${DATA}/INPUT" - #------------------------------------------------------------------ # changeable parameters # dycore definitions @@ -210,8 +187,7 @@ FV3_predet(){ print_freq=${print_freq:-6} #------------------------------------------------------- - if [[ ${RUN} =~ "gfs" || ${RUN} = "gefs" ]]; then - if [[ ! -d ${COM_ATMOS_RESTART} ]]; then mkdir -p "${COM_ATMOS_RESTART}" ; fi + if [[ "${RUN}" =~ "gfs" || "${RUN}" = "gefs" ]]; then ${NLN} "${COM_ATMOS_RESTART}" RESTART # The final restart written at the end doesn't include the valid date # Create links that keep the same name pattern for these files @@ -225,26 +201,68 @@ FV3_predet(){ ${NLN} "${file}" "${COM_ATMOS_RESTART}/${forecast_end_cycle:0:8}.${forecast_end_cycle:8:2}0000.${file}" done else - mkdir -p "${DATA}/RESTART" + if [[ ! -d "${DATA}/RESTART" ]]; then mkdir -p "${DATA}/RESTART"; fi fi - echo "SUB ${FUNCNAME[0]}: pre-determination variables set" } WW3_predet(){ echo "SUB ${FUNCNAME[0]}: WW3 before run type determination" + + if [[ ! -d "${COM_WAVE_HISTORY}" ]]; then mkdir -p "${COM_WAVE_HISTORY}"; fi if [[ ! -d "${COM_WAVE_RESTART}" ]]; then mkdir -p "${COM_WAVE_RESTART}" ; fi + ${NLN} "${COM_WAVE_RESTART}" "restart_wave" } CICE_predet(){ echo "SUB ${FUNCNAME[0]}: CICE before run type determination" + + if [[ ! -d "${COM_ICE_HISTORY}" ]]; then mkdir -p "${COM_ICE_HISTORY}"; fi + if [[ ! -d "${COM_ICE_RESTART}" ]]; then mkdir -p "${COM_ICE_RESTART}"; fi + if [[ ! -d "${COM_ICE_INPUT}" ]]; then mkdir -p "${COM_ICE_INPUT}"; fi + if [[ ! -d "${DATA}/CICE_OUTPUT" ]]; then mkdir -p "${DATA}/CICE_OUTPUT"; fi if [[ ! -d "${DATA}/CICE_RESTART" ]]; then mkdir -p "${DATA}/CICE_RESTART"; fi + + # CICE does not have a concept of high frequency output like FV3 + # Convert output settings into an explicit list for CICE + # Ignore "not used" warning + # shellcheck disable=SC2034 + CICE_OUTPUT_FH=$(seq -s ' ' "${FHMIN}" "${FHOUT}" "${FHMAX}") + } MOM6_predet(){ echo "SUB ${FUNCNAME[0]}: MOM6 before run type determination" + + if [[ ! -d "${COM_OCEAN_HISTORY}" ]]; then mkdir -p "${COM_OCEAN_HISTORY}"; fi + if [[ ! -d "${COM_OCEAN_RESTART}" ]]; then mkdir -p "${COM_OCEAN_RESTART}"; fi + if [[ ! -d "${COM_OCEAN_INPUT}" ]]; then mkdir -p "${COM_OCEAN_INPUT}"; fi + if [[ ! -d "${DATA}/MOM6_OUTPUT" ]]; then mkdir -p "${DATA}/MOM6_OUTPUT"; fi if [[ ! -d "${DATA}/MOM6_RESTART" ]]; then mkdir -p "${DATA}/MOM6_RESTART"; fi + + # MOM6 does not have a concept of high frequency output like FV3 + # Convert output settings into an explicit list for MOM6 + # Ignore "not used" warning + # shellcheck disable=SC2034 + MOM6_OUTPUT_FH=$(seq -s ' ' "${FHMIN}" "${FHOUT}" "${FHMAX}") + +} + +CMEPS_predet(){ + echo "SUB ${FUNCNAME[0]}: CMEPS before run type determination" + + if [[ ! -d "${COM_MED_RESTART}" ]]; then mkdir -p "${COM_MED_RESTART}"; fi + + if [[ ! -d "${DATA}/CMEPS_RESTART" ]]; then mkdir -p "${DATA}/CMEPS_RESTART"; fi + +} + +GOCART_predet(){ + echo "SUB ${FUNCNAME[0]}: GOCART before run type determination" + + if [[ ! -d "${COM_CHEM_HISTORY}" ]]; then mkdir -p "${COM_CHEM_HISTORY}"; fi + } diff --git a/ush/hpssarch_gen.sh b/ush/hpssarch_gen.sh index f1beb9469d..903c2d63fb 100755 --- a/ush/hpssarch_gen.sh +++ b/ush/hpssarch_gen.sh @@ -251,48 +251,64 @@ if [[ ${type} = "gfs" ]]; then } >> "${DATA}/gfswave.txt" fi - if [[ ${DO_OCN} = "YES" ]]; then + if [[ "${DO_OCN}" == "YES" ]]; then - head="gfs.t${cyc}z." + head="gfs.ocean.t${cyc}z." + rm -f "${DATA}/ocean_6hravg.txt"; touch "${DATA}/ocean_6hravg.txt" + rm -f "${DATA}/ocean_daily.txt"; touch "${DATA}/ocean_daily.txt" + rm -f "${DATA}/ocean_grib2.txt"; touch "${DATA}/ocean_grib2.txt" - rm -f "${DATA}/gfs_flux_1p00.txt" - rm -f "${DATA}/ocn_ice_grib2_0p5.txt" - rm -f "${DATA}/ocn_ice_grib2_0p25.txt" - rm -f "${DATA}/ocn_2D.txt" - rm -f "${DATA}/ocn_3D.txt" - rm -f "${DATA}/ocn_xsect.txt" - rm -f "${DATA}/ocn_daily.txt" - touch "${DATA}/gfs_flux_1p00.txt" - touch "${DATA}/ocn_ice_grib2_0p5.txt" - touch "${DATA}/ocn_ice_grib2_0p25.txt" - touch "${DATA}/ocn_2D.txt" - touch "${DATA}/ocn_3D.txt" - touch "${DATA}/ocn_xsect.txt" - touch "${DATA}/ocn_daily.txt" - echo "${COM_OCEAN_INPUT/${ROTDIR}\//}/MOM_input" >> "${DATA}/ocn_2D.txt" - echo "${COM_OCEAN_2D/${ROTDIR}\//}/ocn_2D*" >> "${DATA}/ocn_2D.txt" - echo "${COM_OCEAN_3D/${ROTDIR}\//}/ocn_3D*" >> "${DATA}/ocn_3D.txt" - echo "${COM_OCEAN_XSECT/${ROTDIR}\//}/ocn*EQ*" >> "${DATA}/ocn_xsect.txt" - echo "${COM_OCEAN_HISTORY/${ROTDIR}\//}/ocn_daily*" >> "${DATA}/ocn_daily.txt" - echo "${COM_OCEAN_GRIB_0p50/${ROTDIR}\//}/ocn_ice*0p5x0p5.grb2" >> "${DATA}/ocn_ice_grib2_0p5.txt" - echo "${COM_OCEAN_GRIB_0p25/${ROTDIR}\//}/ocn_ice*0p25x0p25.grb2" >> "${DATA}/ocn_ice_grib2_0p25.txt" + echo "${COM_OCEAN_HISTORY/${ROTDIR}\//}/${head}6hr_avg.f*.nc" >> "${DATA}/ocean_6hravg.txt" + echo "${COM_OCEAN_HISTORY/${ROTDIR}\//}/${head}daily.f*.nc" >> "${DATA}/ocean_daily.txt" + + { + if [[ -d "${COM_OCEAN_GRIB}/5p00" ]]; then + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/5p00/${head}5p00.f*.grib2" + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/5p00/${head}5p00.f*.grib2.idx" + fi + if [[ -d "${COM_OCEAN_GRIB}/1p00" ]]; then + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/1p00/${head}1p00.f*.grib2" + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/1p00/${head}1p00.f*.grib2.idx" + fi + if [[ -d "${COM_OCEAN_GRIB}/0p25" ]]; then + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/0p25/${head}0p25.f*.grib2" + echo "${COM_OCEAN_GRIB/${ROTDIR}\//}/0p25/${head}0p25.f*.grib2.idx" + fi + } >> "${DATA}/ocean_grib2.txt" # Also save fluxes from atmosphere + head="gfs.t${cyc}z." + rm -f "${DATA}/gfs_flux_1p00.txt"; touch "${DATA}/gfs_flux_1p00.txt" { echo "${COM_ATMOS_GRIB_1p00/${ROTDIR}\//}/${head}flux.1p00.f???" echo "${COM_ATMOS_GRIB_1p00/${ROTDIR}\//}/${head}flux.1p00.f???.idx" } >> "${DATA}/gfs_flux_1p00.txt" fi - if [[ ${DO_ICE} = "YES" ]]; then - head="gfs.t${cyc}z." + if [[ "${DO_ICE}" == "YES" ]]; then + head="gfs.ice.t${cyc}z." + rm -f "${DATA}/ice_6hravg.txt"; touch "${DATA}/ice_6hravg.txt" + rm -f "${DATA}/ice_grib2.txt"; touch "${DATA}/ice_grib2.txt" - rm -f "${DATA}/ice.txt" - touch "${DATA}/ice.txt" { - echo "${COM_ICE_INPUT/${ROTDIR}\//}/ice_in" - echo "${COM_ICE_HISTORY/${ROTDIR}\//}/ice*nc" - } >> "${DATA}/ice.txt" + echo "${COM_ICE_HISTORY/${ROTDIR}\//}/${head}ic.nc" + echo "${COM_ICE_HISTORY/${ROTDIR}\//}/${head}6hr_avg.f*.nc" + } >> "${DATA}/ice_6hravg.txt" + + { + if [[ -d "${COM_ICE_GRIB}/5p00" ]]; then + echo "${COM_ICE_GRIB/${ROTDIR}\//}/5p00/${head}5p00.f*.grib2" + echo "${COM_ICE_GRIB/${ROTDIR}\//}/5p00/${head}5p00.f*.grib2.idx" + fi + if [[ -d "${COM_ICE_GRIB}/1p00" ]]; then + echo "${COM_ICE_GRIB/${ROTDIR}\//}/1p00/${head}1p00.f*.grib2" + echo "${COM_ICE_GRIB/${ROTDIR}\//}/1p00/${head}1p00.f*.grib2.idx" + fi + if [[ -d "${COM_ICE_GRIB}/0p25" ]]; then + echo "${COM_ICE_GRIB/${ROTDIR}\//}/0p25/${head}0p25.f*.grib2" + echo "${COM_ICE_GRIB/${ROTDIR}\//}/0p25/${head}0p25.f*.grib2.idx" + fi + } >> "${DATA}/ice_grib2.txt" fi if [[ ${DO_AERO} = "YES" ]]; then @@ -766,4 +782,3 @@ fi ##end of enkfgdas or enkfgfs #----------------------------------------------------- exit 0 - diff --git a/ush/icepost.ncl b/ush/icepost.ncl deleted file mode 100755 index ad102971c4..0000000000 --- a/ush/icepost.ncl +++ /dev/null @@ -1,382 +0,0 @@ -;------------------------------------------------------------------ -; Denise.Worthen@noaa.gov (Feb 2019) -; -; This script will remap CICE5 output on the tripole grid to -; a set of rectilinear grids using pre-computed ESMF weights to remap -; the listed fields to the destination grid and write the results -; to a new netCDF file -; -; See ocnpost.ncl for a complete description -; -; Bin.Li@noaa.gov (May 2019) -; This script is revised to be used in the coupled workflow. -; Revised parts are marked by - - load "$NCARG_ROOT/lib/ncarg/nclscripts/esmf/ESMF_regridding.ncl" - -;---------------------------------------------------------------------- -begin - -;************************************************ -; specify parameters -;************************************************ -; - - output_masks = False - ; destination grid sizes and name - dsttype = (/"rect."/) - ;dstgrds = (/"1p0", "0p5", "0p25"/) -; - - ; specify a location to use - ; nemsrc = "/scratch4/NCEPDEV/ocean/save/Denise.Worthen/NEMS_INPUT0.1/ocnicepost/" - ; interpolation methods - methods = (/"bilinear" ,"conserve"/) - ; ocean model output location - ;dirsrc = "/scratch3/NCEPDEV/stmp2/Denise.Worthen/BM1_ice/" - - - ; variables to be regridded with the native tripole stagger location - - varlist = (/ (/ "hi_h", "Ct", "bilinear"/) \ - ,(/ "hs_h", "Ct", "bilinear"/) \ - ,(/ "Tsfc_h", "Ct", "bilinear"/) \ - ,(/ "aice_h", "Ct", "bilinear"/) \ - ,(/ "sst_h", "Ct", "bilinear"/) \ - /) - dims = dimsizes(varlist) - nvars = dims(0) - delete(dims) - ;print(varlist) - - ; vectors to be regridded with the native tripole stagger location - ; and dimensionality - ; note: vectors are always unstaggered using bilinear weights, but can - ; be remapped using conservative - nvpairs = 1 - veclist = new( (/nvpairs,3,2/),"string") - veclist = (/ (/ (/"uvel_h", "vvel_h"/), (/"Bu", "Bu"/), (/"bilinear", "bilinear"/) /) \ - /) - ;print(veclist) - - begTime = get_cpu_time() -;---------------------------------------------------------------------- -; make a list of the directories and files from the run -;---------------------------------------------------------------------- -; idate = "20120101" -; icefilelist = systemfunc("ls "+dirsrc+"gfs."+idate+"/00/"+"ice*.nc") -; icef = addfiles(icefilelist,"r") -; nfiles = dimsizes(icefilelist) -; - - ; get the rotation angle - angleT = icef[0]->ANGLET - - ; get a 2 dimensional fields for creating the interpolation mask - ; the mask2d contain 1's on land and 0's at valid points. - mask2d = where(ismissing(icef[0]->sst_h), 1.0, 0.0) - ;printVarSummary(mask2d) - - ; create conformed rotation arrays to make vector rotations cleaner - angleT2d=conform_dims(dimsizes(mask2d),angleT,(/1,2/)) - -;---------------------------------------------------------------------- -; loop over the output resolutions -;---------------------------------------------------------------------- - - jj = 1 - ii = 0 - - do jj = 0,dimsizes(dstgrds)-1 - ;outres = "_"+dstgrds(jj)+"x"+dstgrds(jj) - outres = dstgrds(jj)+"x"+dstgrds(jj) - outgrid = dstgrds(jj) - - ; regrid a field to obtain the output xy dimensions - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+".bilinear.nc" - tt = ESMF_regrid_with_weights(angleT,wgtsfile,False) - tt!0 = "lat" - tt!1 = "lon" - lat = tt&lat - lon = tt&lon - dims = dimsizes(tt) - nlat = dims(0) - nlon = dims(1) - print("fields will be remapped to destination grid size "\ - +nlon+" "+nlat) - - delete(tt) - delete(dims) - - ; regrid the masks to obtain the interpolation masks. - ; the mask2d contain 1's on land and 0's at valid points. - ; when remapped, any mask value > 0 identifies land values that - ; have crept into the field. remapped model fields are then - ; masked with this interpolation mask - - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+".bilinear.nc" - rgmask2d = ESMF_regrid_with_weights(mask2d, wgtsfile,False) - - if(output_masks)then - testfile = "masks_"+dstgrds(jj)+".nc" - system("/bin/rm -f "+testfile) - ; create - testcdf = addfile(testfile,"c") - testcdf->rgmask2d = rgmask2d - ; close - delete(testcdf) - end if - - ; create the interpolation mask - rgmask2d = where(rgmask2d .gt. 0.0, rgmask2d@_FillValue, 1.0) - -;---------------------------------------------------------------------- -; loop over each file in the icefilelist -;---------------------------------------------------------------------- -; - ; retrieve the time stamp - time = icef[0]->time - delete(time@bounds) - -;---------------------------------------------------------------------- -; set up the output netcdf file -;---------------------------------------------------------------------- -; system("/bin/rm -f " + outfile) ; remove if exists -; outcdf = addfile (outfile, "c") ; open output file -; -; - - ; explicitly declare file definition mode. Improve efficiency. - setfileoption(outcdf,"DefineMode",True) - - ; create global attributes of the file - fAtt = True ; assign file attributes - fAtt@creation_date = systemfunc ("date") - fAtt@source_file = infile - fileattdef( outcdf, fAtt ) ; copy file attributes - - ; predefine the coordinate variables and their dimensionality - dimNames = (/"time", "lat", "lon"/) - dimSizes = (/ -1 , nlat, nlon/) - dimUnlim = (/ True , False, False/) - filedimdef(outcdf,dimNames,dimSizes,dimUnlim) - - ; predefine the the dimensionality of the variables to be written out - filevardef(outcdf, "time", typeof(time), getvardims(time)) - filevardef(outcdf, "lat", typeof(lat), getvardims(lat)) - filevardef(outcdf, "lon", typeof(lon), getvardims(lon)) - - ; Copy attributes associated with each variable to the file - filevarattdef(outcdf, "time", time) - filevarattdef(outcdf, "lat", lat) - filevarattdef(outcdf, "lon", lon) - - ; predefine variables - do nv = 0,nvars-1 - varname = varlist(nv,0) - odims = (/"time", "lat", "lon"/) - ;print("creating variable "+varname+" in file") - filevardef(outcdf, varname, "float", odims) - delete(odims) - end do - - do nv = 0,nvpairs-1 - do nn = 0,1 - vecname = veclist(nv,0,nn) - odims = (/"time", "lat", "lon"/) - ;print("creating variable "+vecname+" in file") - filevardef(outcdf, vecname, "float", odims) - delete(odims) - end do - end do - - ; explicitly exit file definition mode. - setfileoption(outcdf,"DefineMode",False) - - lat=lat(::-1) - ; write the dimensions to the file - outcdf->time = (/time/) - outcdf->lat = (/lat/) - outcdf->lon = (/lon/) - -;---------------------------------------------------------------------- -; loop over nvars variables -;---------------------------------------------------------------------- - - ;nv = 1 - do nv = 0,nvars-1 - varname = varlist(nv,0) - vargrid = varlist(nv,1) - varmeth = varlist(nv,2) - - ;print(nv+" "+varname+" "+vargrid+" "+varmeth) - icevar = icef[ii]->$varname$ - ndims = dimsizes(dimsizes(icevar)) - ;print(ndims+" "+dimsizes(icevar)) - - if(vargrid .ne. "Ct")then - ; print error if the variable is not on the Ct grid - print("Variable is not on Ct grid") - exit - end if - - ; regrid to dsttype+dstgrd with method - ;print("remapping "+varname+" to grid "+dsttype+dstgrds(jj)) - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+"."+varmeth+".nc" - - rgtt = ESMF_regrid_with_weights(icevar,wgtsfile,False) - rgtt = where(ismissing(rgmask2d),icevar@_FillValue,rgtt) - rgtt=rgtt(:,::-1,:) - - ; enter file definition mode to add variable attributes - setfileoption(outcdf,"DefineMode",True) - filevarattdef(outcdf, varname, rgtt) - setfileoption(outcdf,"DefineMode",False) - - - outcdf->$varname$ = (/rgtt/) - - delete(icevar) - delete(rgtt) - - ; nv, loop over number of variables - end do - -;---------------------------------------------------------------------- -; -;---------------------------------------------------------------------- - - ;nv = 0 - do nv = 0,nvpairs-1 - vecnames = veclist(nv,0,:) - vecgrids = veclist(nv,1,:) - vecmeth = veclist(nv,2,:) - ;print(nv+" "+vecnames+" "+vecgrids+" "+vecmeth) - - ; create a vector pair list - vecpairs = NewList("fifo") - n = 0 - uvel = icef[ii]->$vecnames(n)$ - vecfld = where(ismissing(uvel),0.0,uvel) - copy_VarAtts(uvel,vecfld) - ;print("unstagger "+vecnames(n)+" from "+vecgrids(n)+" to Ct") - wgtsfile = nemsrc+"/"+"tripole.mx025."+vecgrids(n)+".to.Ct.bilinear.nc" - ut = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - delete(ut@remap) - - n = 1 - vvel = icef[ii]->$vecnames(n)$ - vecfld = where(ismissing(vvel),0.0,vvel) - copy_VarAtts(vvel,vecfld) - ;print("unstagger "+vecnames(n)+" from "+vecgrids(n)+" to Ct") - wgtsfile = nemsrc+"/"+"tripole.mx025."+vecgrids(n)+".to.Ct.bilinear.nc" - vt = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - delete(vt@remap) - - ListAppend(vecpairs,ut) - ListAppend(vecpairs,vt) - ;print(vecpairs) - - ; rotate - ; first copy Metadata - urot = vecpairs[0] - vrot = vecpairs[1] - urot = cos(angleT2d)*ut - sin(angleT2d)*vt - vrot = sin(angleT2d)*ut + cos(angleT2d)*vt - - ; change attribute to indicate these are now rotated velocities - urot@long_name=str_sub_str(urot@long_name,"(x)","zonal") - vrot@long_name=str_sub_str(vrot@long_name,"(y)","meridional") - ; copy back - vecpairs[0] = urot - vecpairs[1] = vrot - delete([/urot, vrot/]) - - ; remap - do n = 0,1 - vecfld = vecpairs[n] - ; regrid to dsttype+dstgrd with method - ;print("remapping "+vecnames(n)+" to grid "+dsttype+dstgrds(jj)) - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+"."+vecmeth(n)+".nc" - - rgtt = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - rgtt = where(ismissing(rgmask2d),vecfld@_FillValue,rgtt) - rgtt=rgtt(:,::-1,:) - - ; enter file definition mode to add variable attributes - setfileoption(outcdf,"DefineMode",True) - filevarattdef(outcdf, vecnames(n), rgtt) - setfileoption(outcdf,"DefineMode",False) - - outcdf->$vecnames(n)$ = (/rgtt/) - delete(rgtt) - end do - delete([/uvel,vvel,ut,vt,vecfld,vecpairs/]) - delete([/vecnames,vecgrids,vecmeth/]) - ; nv, loop over number of vector pairs - end do - -;---------------------------------------------------------------------- -; close the outcdf and continue through filelist -;---------------------------------------------------------------------- - - delete(outcdf) - - ; ii, loop over files - ;end do - ;jj, loop over destination grids - delete([/lat,lon,nlon,nlat/]) - delete([/rgmask2d/]) - end do - print("One complete ice file in " + (get_cpu_time() - begTime) + " seconds") -exit -end diff --git a/ush/oceanice_nc2grib2.sh b/ush/oceanice_nc2grib2.sh new file mode 100755 index 0000000000..1d0e5ae274 --- /dev/null +++ b/ush/oceanice_nc2grib2.sh @@ -0,0 +1,319 @@ +#!/bin/bash + +# This script contains functions to convert ocean/ice rectilinear netCDF files to grib2 format +# This script uses the wgrib2 utility to convert the netCDF files to grib2 format and then indexes it + +source "${HOMEgfs}/ush/preamble.sh" + +################################################################################ +function _ice_nc2grib2 { +# This function converts the ice rectilinear netCDF files to grib2 format + + # Set the inputs + local grid=${1} # 0p25, 0p50, 1p00, 5p00 + local latlon_dims=${2} # 0:721:0:1440, 0:361:0:720, 0:181:0:360, 0:36:0:72 + local current_cycle=${3} # YYYYMMDDHH + local aperiod=${4} # 0-6 + local infile=${5} # ice.0p25.nc + local outfile=${6} # ice.0p25.grib2 + local template=${7} # template.global.0p25.gb2 + + ${WGRIB2} "${template}" \ + -import_netcdf "${infile}" "hi_h" "0:1:${latlon_dims}" \ + -set_var ICETK -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "aice_h" "0:1:${latlon_dims}" \ + -set_var ICEC -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "Tsfc_h" "0:1:${latlon_dims}" \ + -set_var ICETMP -set center 7 -rpn "273.15:+" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "uvel_h" "0:1:${latlon_dims}" \ + -set_var UICE -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "vvel_h" "0:1:${latlon_dims}" \ + -set_var VICE -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" + +# Additional variables needed for GFSv17/GEFSv13 operational forecast +# files, but GRIB2 parameters not available in NCEP (-set center 7) +# tables in wgrib2 v2.0.8: + +# -import_netcdf "${infile}" "hs_h" "0:1:${latlon_dims}" \ +# -set_var ??? -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "frzmlt_h" "0:1:${latlon_dims}" \ +# -set_var ??? -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "albsni_h" "0:1:${latlon_dims}" \ +# -set_var ALBICE -set center 7 -rpn "100.0:/" \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "mlt_onset_h" "0:1:${latlon_dims}" \ +# -set_var ??? -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "frz_onset_h" "0:1:${latlon_dims}" \ +# -set_var ??? -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" + + rc=$? + # Check if the conversion was successful + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ice rectilinear netCDF file to grib2 format" + fi + return "${rc}" + +} + +################################################################################ +function _ocean2D_nc2grib2 { +# This function converts the ocean 2D rectilinear netCDF files to grib2 format + + # Set the inputs + local grid=${1} # 0p25, 0p50, 1p00, 5p00 + local latlon_dims=${2} # 0:721:0:1440, 0:361:0:720, 0:181:0:360, 0:36:0:72 + local current_cycle=${3} # YYYYMMDDHH + local aperiod=${4} # 0-6 + local infile=${5} # ocean.0p25.nc + local outfile=${6} # ocean_2D.0p25.grib2 + local template=${7} # template.global.0p25.gb2 + + ${WGRIB2} "${template}" \ + -import_netcdf "${infile}" "SSH" "0:1:${latlon_dims}" \ + -set_var SSHG -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "SST" "0:1:${latlon_dims}" \ + -set_var WTMP -set center 7 -rpn "273.15:+" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "SSS" "0:1:${latlon_dims}" \ + -set_var SALIN -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "speed" "0:1:${latlon_dims}" \ + -set_var SPC -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "SSU" "0:1:${latlon_dims}" \ + -set_var UOGRD -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "SSV" "0:1:${latlon_dims}" \ + -set_var VOGRD -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "latent" "0:1:${latlon_dims}" \ + -set_var LHTFL -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "sensible" "0:1:${latlon_dims}" \ + -set_var SHTFL -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "SW" "0:1:${latlon_dims}" \ + -set_var DSWRF -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "LW" "0:1:${latlon_dims}" \ + -set_var DLWRF -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "LwLatSens" "0:1:${latlon_dims}" \ + -set_var THFLX -set center 7 \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ + -import_netcdf "${infile}" "MLD_003" "0:1:${latlon_dims}" \ + -set_var WDEPTH -set center 7 -set_lev "mixed layer depth" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" + +# Additional variables needed for GFSv17/GEFSv13 operational forecast +# files, but GRIB2 parameters not available in NCEP (-set center 7) +# tables in wgrib2 v2.0.8: +# +# -import_netcdf "${infile}" "Heat_PmE" "0:1:${latlon_dims}" \ +# -set_var DWHFLUX -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "taux" "0:1:${latlon_dims}" \ +# -set_var XCOMPSS -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" \ +# -import_netcdf "${infile}" "tauy" "0:1:${latlon_dims}" \ +# -set_var YCOMPSS -set center 7 \ +# -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ +# -set_scaling same same -set_grib_type c1 -grib_out "${outfile}" + + rc=$? + # Check if the conversion was successful + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ocean rectilinear netCDF file to grib2 format" + fi + return "${rc}" + +} + +################################################################################ +function _ocean3D_nc2grib2 { +# This function converts the ocean 3D rectilinear netCDF files to grib2 format + + # Set the inputs + local grid=${1} # 0p25, 0p50, 1p00, 5p00 + local latlon_dims=${2} # 0:721:0:1440, 0:361:0:720, 0:181:0:360, 0:36:0:72 + local levels=${3} # 5:15:25:35:45:55:65:75:85:95:105:115:125 + local current_cycle=${4} # YYYYMMDDHH + local aperiod=${5} # 0-6 + local infile=${6} # ocean.0p25.nc + local outfile=${7} # ocean_3D.0p25.grib2 + local template=${8} # template.global.0p25.gb2 + + IFS=':' read -ra depths <<< "${levels}" + + zl=0 + for depth in "${depths[@]}"; do + + [[ -f "tmp.gb2" ]] && rm -f "tmp.gb2" + + ${WGRIB2} "${template}" \ + -import_netcdf "${infile}" "temp" "0:1:${zl}:1:${latlon_dims}" \ + -set_var WTMP -set center 7 -rpn "273.15:+" \ + -set_lev "${depth} m below water surface" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out tmp.gb2 \ + -import_netcdf "${infile}" "so" "0:1:${zl}:1:${latlon_dims}" \ + -set_var SALIN -set center 7 \ + -set_lev "${depth} m below water surface" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out tmp.gb2 \ + -import_netcdf "${infile}" "uo" "0:1:${zl}:1:${latlon_dims}" \ + -set_var UOGRD -set center 7 \ + -set_lev "${depth} m below water surface" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out tmp.gb2 \ + -import_netcdf "${infile}" "vo" "0:1:${zl}:1:${latlon_dims}" \ + -set_var VOGRD -set center 7 \ + -set_lev "${depth} m below water surface" \ + -set_date "${current_cycle}" -set_ftime "${aperiod} hour ave fcst" \ + -set_scaling same same -set_grib_type c1 -grib_out tmp.gb2 + + rc=$? + # Check if the conversion was successful + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ocean rectilinear netCDF file to grib2 format at depth ${depth}m, ABORT!" + return "${rc}" + fi + + cat tmp.gb2 >> "${outfile}" + rm -f tmp.gb2 + ((zl = zl + 1)) + + done + + # Notes: + # WATPTEMP (water potential temperature (theta)) may be a better + # GRIB2 parameter than WTMP (water temperature) if MOM6 outputs + # potential temperature. WATPTEMP is not available in NCEP + # (-set center 7) tables in wgrib2 v2.0.8. + + return "${rc}" + +} + +################################################################################ +# Input arguments +component=${1:?"Need a valid component; options: ice|ocean"} +grid=${2:-"0p25"} # Default to 0.25-degree grid +current_cycle=${3:-"2013100100"} # Default to 2013100100 +avg_period=${4:-"0-6"} # Default to 6-hourly average +ocean_levels=${5:-"5:15:25:35:45:55:65:75:85:95:105:115:125"} # Default to 12-levels + +case "${grid}" in + "0p25") + latlon_dims="0:721:0:1440" + ;; + "0p50") + latlon_dims="0:361:0:720" + ;; + "1p00") + latlon_dims="0:181:0:360" + ;; + "5p00") + latlon_dims="0:36:0:72" + ;; + *) + echo "FATAL ERROR: Unsupported grid '${grid}', ABORT!" + exit 1 + ;; +esac + +input_file="${component}.${grid}.nc" +template="template.global.${grid}.gb2" + +# Check if the template file exists +if [[ ! -f "${template}" ]]; then + echo "FATAL ERROR: '${template}' does not exist, ABORT!" + exit 127 +fi + +# Check if the input file exists +if [[ ! -f "${input_file}" ]]; then + echo "FATAL ERROR: '${input_file}' does not exist, ABORT!" + exit 127 +fi + +case "${component}" in + "ice") + rm -f "${component}.${grid}.grib2" || true + _ice_nc2grib2 "${grid}" "${latlon_dims}" "${current_cycle}" "${avg_period}" "${input_file}" "${component}.${grid}.grib2" "${template}" + rc=$? + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ice rectilinear netCDF file to grib2 format" + exit "${rc}" + fi + ;; + "ocean") + rm -f "${component}_2D.${grid}.grib2" || true + _ocean2D_nc2grib2 "${grid}" "${latlon_dims}" "${current_cycle}" "${avg_period}" "${input_file}" "${component}_2D.${grid}.grib2" "${template}" + rc=$? + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ocean 2D rectilinear netCDF file to grib2 format" + exit "${rc}" + fi + rm -f "${component}_3D.${grid}.grib2" || true + _ocean3D_nc2grib2 "${grid}" "${latlon_dims}" "${ocean_levels}" "${current_cycle}" "${avg_period}" "${input_file}" "${component}_3D.${grid}.grib2" "${template}" + rc=$? + if (( rc != 0 )); then + echo "FATAL ERROR: Failed to convert the ocean 3D rectilinear netCDF file to grib2 format" + exit "${rc}" + fi + # Combine the 2D and 3D grib2 files into a single file + rm -f "${component}.${grid}.grib2" || true + cat "${component}_2D.${grid}.grib2" "${component}_3D.${grid}.grib2" > "${component}.${grid}.grib2" + + ;; + *) + echo "FATAL ERROR: Unknown component: '${component}'. ABORT!" + exit 3 + ;; +esac + +# Index the output grib2 file +${WGRIB2} -s "${component}.${grid}.grib2" > "${component}.${grid}.grib2.idx" +rc=$? +# Check if the indexing was successful +if (( rc != 0 )); then + echo "FATAL ERROR: Failed to index the file '${component}.${grid}.grib2'" + exit "${rc}" +fi + +exit 0 diff --git a/ush/ocnpost.ncl b/ush/ocnpost.ncl deleted file mode 100755 index 27e60b0edf..0000000000 --- a/ush/ocnpost.ncl +++ /dev/null @@ -1,588 +0,0 @@ -;------------------------------------------------------------------ -; Denise.Worthen@noaa.gov (Feb 2019) -; -; This script will remap MOM6 ocean output on the tripole grid to -; a set of rectilinear grids using pre-computed ESMF weights to remap -; the listed fields to the destination grid and write the results -; to a new netCDF file -; -; Prior to running this script, files containing the conservative -; and bilinear regridding weights must be generated. These weights -; are created using the generate_iceocnpost_weights.ncl script. -; -; Note: the descriptive text below assumes fortran type indexing -; where the variables are indexed as (i,j) and indices start at 1 -; NCL indices are (j,i) and start at 0 -; -; The post involves these steps -; -; a) unstaggering velocity points -; MOM6 is on an Arakawa C grid. MOM6 refers to these -; locations as "Ct" for the centers and "Cu", "Cv" -; "Bu" for the left-right, north-south and corner -; points, respectively. -; -; The indexing scheme in MOM6 is as follows: -; -; Cv@i,j -; ----X------X Bu@i,j -; | -; | -; Ct@i,j | -; X X Cu@i,j -; | -; | -; | -; -; CICE5 is on an Arakawa B grid. CICE5 refers to these -; locations as TLAT,TLON for the centers and ULAT,ULON -; for the corners -; -; In UFS, the CICE5 grid has been created using the MOM6 -; supergrid file. Therefore, all grid points are consistent -; between the two models. -; -; In the following, MOM6's nomenclature will be followed, -; so that CICE5's U-grid will be referred to as "Bu". -; -; b) rotation of tripole vectors to East-West -; MOM6 and CICE6 both output velocties on their native -; velocity points. For MOM6, that is u-velocities on the -; Cu grid and v-velocites on the Cv grid. For CICE5, it is -; both u and v-velocities on the Bu grid. -; -; The rotation angle for both models are defined at center -; grid points; therefore the velocities need to be first -; unstaggered before rotation. MOM6 and CICE5 also define -; opposite directions for the rotations. Finally, while the -; grid points are identical between the two models, CICE5 -; calculates the rotation angle at center grid points by -; averaging the four surrounding B grid points. MOM6 derives -; the rotation angle at the center directly from the latitude -; and longitude of the center grid points. The angles are therefor -; not identical between the two grids. -; -; c) conservative regridding of some fields -; Fields such as ice concentration or fluxes which inherently -; area area-weighted require conservative regridding. Most other -; variables are state variables and can be regridded using -; bilinear weighting. -; -; An efficient way to accomplish the unstaggering of velocities -; is to use the bilinear interpolation weights between grid -; points of the Arakawa C grid and the center grid points (for example -; Cu->Ct). These weights are generated by the weight generation script -; -; Remapping from the tripole to rectilinear uses either the bilinear -; or conservative weights from the weight generation script. Bilinear weights -; generated for the first vertical level can be used on other levels -; (where the masking changes) by utilizing the correct masking procedure. -; Set output_masks to true to examine the interpolation masks. -; -; Intermediate file output can easily be generated for debugging by -; follwing the example in the output_masks logical -; -; Bin.Li@noaa.gov (May 2019) -; The scripts is revised for use in the coupled workflow. -; - load "$NCARG_ROOT/lib/ncarg/nclscripts/esmf/ESMF_regridding.ncl" - -;---------------------------------------------------------------------- -begin -; - - ; warnings (generated by int2p_n_Wrap) can be supressed by - ; the following (comment out to get the warnings) - err = NhlGetErrorObjectId() - setvalues err -; "errLevel" : "Fatal" ; only report Fatal errors - "errLevel" : "Verbose" - end setvalues - - output_masks = False - - ; specify a location to use - ; nemsrc = "/scratch4/NCEPDEV/ocean/save/Denise.Worthen/NEMS_INPUT0.1/ocnicepost/" - ; interpolation methods - methods = (/"bilinear" ,"conserve"/) - ; ocean model output location - ;dirsrc = "/scratch3/NCEPDEV/stmp2/Denise.Worthen/BM1_ocn/" - - ; destination grid sizes and name - dsttype = (/"rect."/) - ;dstgrds = (/"1p0", "0p5", "0p25"/) - ;dstgrds = (/"0p5"/) - dstgrds = (/"0p25"/) - - ; variables to be regridded with the native tripole stagger location - ; and dimensionality - ; first BM contained only field "mld", which was actually ePBL - ; the remaining BMs contain ePBL, MLD_003 and MLD_0125 - ; the following NCO command will be issued at the end - ; to rename the variable mld to ePBL if the variable mld is found - ; ncocmd = "ncrename -O -v mld,ePBL " - ncocmd = "ncrename -O -v MLD_003,mld" - - varlist = (/ (/ "SSH", "Ct", "bilinear", "2"/) \ - ,(/ "SST", "Ct", "bilinear", "2"/) \ - ,(/ "SSS", "Ct", "bilinear", "2"/) \ - ,(/ "speed", "Ct", "bilinear", "2"/) \ - ,(/ "temp", "Ct", "bilinear", "3"/) \ - ,(/ "so", "Ct", "bilinear", "3"/) \ - ,(/ "latent", "Ct", "conserve", "2"/) \ - ,(/ "sensible", "Ct", "conserve", "2"/) \ - ,(/ "SW", "Ct", "conserve", "2"/) \ - ,(/ "LW", "Ct", "conserve", "2"/) \ - ,(/ "evap", "Ct", "conserve", "2"/) \ - ,(/ "lprec", "Ct", "conserve", "2"/) \ - ,(/ "fprec", "Ct", "conserve", "2"/) \ - ,(/"LwLatSens", "Ct", "conserve", "2"/) \ - ,(/ "Heat_PmE", "Ct", "conserve", "2"/) \ -; ,(/ "mld", "Ct", "bilinear", "2"/) \ - ,(/ "ePBL", "Ct", "bilinear", "2"/) \ - ,(/ "MLD_003", "Ct", "bilinear", "2"/) \ - ,(/ "MLD_0125", "Ct", "bilinear", "2"/) \ - /) - dims = dimsizes(varlist) - nvars = dims(0) - delete(dims) - ;print(varlist) - - ; vectors to be regridded with the native tripole stagger location - ; and dimensionality - ; note: vectors are always unstaggered using bilinear weights, but can - ; be remapped using conservative - nvpairs = 3 - veclist = new( (/nvpairs,4,2/),"string") - veclist = (/ (/ (/ "SSU", "SSV"/), (/"Cu", "Cv"/), (/"bilinear", "bilinear"/), (/"2", "2"/) /) \ - , (/ (/ "uo", "vo"/), (/"Cu", "Cv"/), (/"bilinear", "bilinear"/), (/"3", "3"/) /) \ - , (/ (/ "taux", "tauy"/), (/"Cu", "Cv"/), (/"conserve", "conserve"/), (/"2", "2"/) /) \ - /) - ;print(veclist) - - begTime = get_cpu_time() -;---------------------------------------------------------------------- -; make a list of the directories and files from the run -;---------------------------------------------------------------------- - -; idate = "20120101" - -; ocnfilelist = systemfunc("ls "+dirsrc+"gfs."+idate+"/00/"+"ocn*.nc") -; ocnf = addfiles(ocnfilelist,"r") -; nfiles = dimsizes(ocnfilelist) -; - - ; get the rotation angles and vertical grid from the first file - ; two different name were used for the angles, either sinrot,cosrot - ; or sin_rot,cos_rot - if(isfilevar(ocnf[0],"sin_rot"))then - sinrot = ocnf[0]->sin_rot - else - sinrot = ocnf[0]->sinrot - end if - if(isfilevar(ocnf[0],"cos_rot"))then - cosrot = ocnf[0]->cos_rot - else - cosrot = ocnf[0]->cosrot - end if - z_l = ocnf[0]->z_l - z_i = ocnf[0]->z_i - nlevs = dimsizes(z_l) - - ; get a 2 and 3 dimensional fields for creating the interpolation masks - ; the mask2d,mask3d contain 1's on land and 0's at valid points. - mask2d = where(ismissing(ocnf[0]->SST), 1.0, 0.0) - mask3d = where(ismissing(ocnf[0]->temp), 1.0, 0.0) - ;printVarSummary(mask2d) - ;printVarSummary(mask3d) - - ; create conformed rotation arrays to make vector rotations cleaner - sinrot2d=conform_dims(dimsizes(mask2d),sinrot,(/1,2/)) - cosrot2d=conform_dims(dimsizes(mask2d),cosrot,(/1,2/)) - - sinrot3d=conform_dims(dimsizes(mask3d),sinrot,(/2,3/)) - cosrot3d=conform_dims(dimsizes(mask3d),cosrot,(/2,3/)) - - ; check for variables in file. this is only required because - ; of the missing/misnamed MLD variables in the first BM - ; only the varlist is checked, since it is assumed there are - ; no other variables missing after the first benchmark - valid = new((/nvars/),"logical") - valid = False - do nv = 0,nvars-1 - varname = varlist(nv,0) - if(isfilevar(ocnf[0],varname))then - valid(nv) = True - end if - print(varlist(nv,0)+" "+valid(nv)) - end do - -;---------------------------------------------------------------------- -; loop over the output resolutions -;---------------------------------------------------------------------- - - jj = 1 - ii = 0 - - do jj = 0,dimsizes(dstgrds)-1 - ;outres = "_"+dstgrds(jj)+"x"+dstgrds(jj) - outres = dstgrds(jj)+"x"+dstgrds(jj) - outgrid = dstgrds(jj) - - ; regrid a field to obtain the output xy dimensions - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+".bilinear.nc" - tt = ESMF_regrid_with_weights(sinrot,wgtsfile,False) - tt!0 = "lat" - tt!1 = "lon" - lat = tt&lat - lon = tt&lon - dims = dimsizes(tt) - nlat = dims(0) - nlon = dims(1) - - print("fields will be remapped to destination grid size "\ - +nlon+" "+nlat) - - delete(tt) - delete(dims) - - ; regrid the masks to obtain the interpolation masks. - ; the mask2d,mask3d contain 1's on land and 0's at valid points. - ; when remapped, any mask value > 0 identifies land values that - ; have crept into the field. remapped model fields are then - ; masked with this interpolation mask - - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+".bilinear.nc" - rgmask2d = ESMF_regrid_with_weights(mask2d, wgtsfile,False) - rgmask3d = ESMF_regrid_with_weights(mask3d, wgtsfile,False) - - if(output_masks)then - testfile = "masks_"+dstgrds(jj)+".nc" - system("/bin/rm -f "+testfile) - ; create - testcdf = addfile(testfile,"c") - testcdf->rgmask2d = rgmask2d - testcdf->rgmask3d = rgmask3d - ; close - delete(testcdf) - end if - - ; create the interpolation mask - rgmask2d = where(rgmask2d .gt. 0.0, rgmask2d@_FillValue, 1.0) - rgmask3d = where(rgmask3d .gt. 0.0, rgmask3d@_FillValue, 1.0) - - ; conformed depth array - depth = conform_dims(dimsizes(mask3d), z_l, (/1/)) - ;print(dimsizes(depth)) - -;---------------------------------------------------------------------- -; loop over each file in the ocnfilelist -;---------------------------------------------------------------------- -; - - ; retrieve the time stamp - time = ocnf[0]->time - delete(time@bounds) - -;---------------------------------------------------------------------- -; set up the output netcdf file -;---------------------------------------------------------------------- -; system("/bin/rm -f " + outfile) ; remove if exists -; outcdf = addfile (outfile, "c") ; open output file -; specify output file information and open file for output - FILENAME_REGRID = DATA_TMP+"/ocnr"+VDATE+"."+ENSMEM+"."+IDATE+"_"+outres+"_MOM6.nc" - if (isfilepresent(FILENAME_REGRID)) then - system("rm -f "+FILENAME_REGRID) - end if - outcdf = addfile(FILENAME_REGRID,"c") - outfile=FILENAME_REGRID - - ; explicitly declare file definition mode. Improve efficiency. - setfileoption(outcdf,"DefineMode",True) - - ; create global attributes of the file - fAtt = True ; assign file attributes - fAtt@creation_date = systemfunc ("date") - fAtt@source_file = infile - fileattdef( outcdf, fAtt ) ; copy file attributes - - ; predefine the coordinate variables and their dimensionality - ; dimNames = (/"time", "z_l", "z_i", "z_T", "lat", "lon"/) - dimNames = (/"time", "z_l", "z_i", "lat", "lon"/) - ;dimSizes = (/ -1 , nlevs, nlevs+1, nTd, nlat, nlon/) - dimSizes = (/ -1 , nlevs, nlevs+1, nlat, nlon/) - ;dimUnlim = (/ True , False, False, False, False, False/) - dimUnlim = (/ True , False, False, False, False/) - filedimdef(outcdf,dimNames,dimSizes,dimUnlim) - - ; predefine the the dimensionality of the variables to be written out - filevardef(outcdf, "time", typeof(time), getvardims(time)) - filevardef(outcdf, "z_l", typeof(z_l), getvardims(z_l)) - filevardef(outcdf, "z_i", typeof(z_i), getvardims(z_i)) - ;filevardef(outcdf, "z_T", typeof(z_T), getvardims(z_T)) - filevardef(outcdf, "lat", typeof(lat), getvardims(lat)) - filevardef(outcdf, "lon", typeof(lon), getvardims(lon)) - - ; Copy attributes associated with each variable to the file - filevarattdef(outcdf, "time", time) - filevarattdef(outcdf, "z_l", z_l) - filevarattdef(outcdf, "z_i", z_i) - ;filevarattdef(outcdf, "z_T", z_T) - filevarattdef(outcdf, "lat", lat) - filevarattdef(outcdf, "lon", lon) - - ; predefine variables - do nv = 0,nvars-1 - varname = varlist(nv,0) - vardims = varlist(nv,3) - if(valid(nv))then - if(vardims .eq. "2")then - odims = (/"time", "lat", "lon"/) - else - odims = (/"time", "z_l", "lat", "lon"/) - end if - ;print("creating variable "+varname+" in file") - filevardef(outcdf, varname, "float", odims) - delete(odims) - end if - end do - - do nv = 0,nvpairs-1 - do nn = 0,1 - vecname = veclist(nv,0,nn) - vecdims = veclist(nv,3,nn) - if(vecdims .eq. "2")then - odims = (/"time", "lat", "lon"/) - else - odims = (/"time", "z_l", "lat", "lon"/) - end if - ;print("creating variable "+vecname+" in file") - filevardef(outcdf, vecname, "float", odims) - delete(odims) - delete(vecdims) - end do - end do - - ; explicitly exit file definition mode. - setfileoption(outcdf,"DefineMode",False) - - ; write the dimensions to the file - outcdf->time = (/time/) - outcdf->z_l = (/z_l/) - outcdf->z_i = (/z_i/) -; outcdf->z_T = (/z_T/) -; - outcdf->lat = (/lat/) - outcdf->lon = (/lon/) - -;---------------------------------------------------------------------- -; loop over nvars variables -;---------------------------------------------------------------------- - - do nv = 0,nvars-1 - varname = varlist(nv,0) - vargrid = varlist(nv,1) - varmeth = varlist(nv,2) - vardims = varlist(nv,3) - - if(valid(nv))then - ;print(nv+" "+varname+" "+vargrid+" "+varmeth) - ocnvar = ocnf[ii]->$varname$ - ndims = dimsizes(dimsizes(ocnvar)) - ;print(ndims+" "+dimsizes(ocnvar)) - - if(vargrid .ne. "Ct")then - ; print error if the variable is not on the Ct grid - print("Variable is not on Ct grid") - exit - end if - - ; regrid to dsttype+dstgrd with method - ;print("remapping "+varname+" to grid "+dsttype+dstgrds(jj)) - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+"."+varmeth+".nc" - - rgtt = ESMF_regrid_with_weights(ocnvar,wgtsfile,False) - if(vardims .eq. "2")then - rgtt = where(ismissing(rgmask2d),ocnvar@_FillValue,rgtt) - rgtt=rgtt(:,::-1,:) - else - rgtt = where(ismissing(rgmask3d),ocnvar@_FillValue,rgtt) - rgtt=rgtt(:,:,::-1,:) - end if - - ; enter file definition mode to add variable attributes - setfileoption(outcdf,"DefineMode",True) - filevarattdef(outcdf, varname, rgtt) - setfileoption(outcdf,"DefineMode",False) - - outcdf->$varname$ = (/rgtt/) - - delete(ocnvar) - delete(rgtt) - - ; variable exists - end if - ; nv, loop over number of variables - end do - -;---------------------------------------------------------------------- -; -;---------------------------------------------------------------------- - - ;nv = 2 - do nv = 0,nvpairs-1 - vecnames = veclist(nv,0,:) - vecgrids = veclist(nv,1,:) - vecmeth = veclist(nv,2,:) - vecdims = veclist(nv,3,:) - ;print(nv+" "+vecnames+" "+vecgrids+" "+vecmeth) - - ; create a vector pair list - vecpairs = NewList("fifo") - n = 0 - uvel = ocnf[ii]->$vecnames(n)$ - vecfld = where(ismissing(uvel),0.0,uvel) - copy_VarAtts(uvel,vecfld) - ;print("unstagger "+vecnames(n)+" from "+vecgrids(n)+" to Ct") - wgtsfile = nemsrc+"/"+"tripole.mx025."+vecgrids(n)+".to.Ct.bilinear.nc" - ut = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - delete(ut@remap) - - n = 1 - vvel = ocnf[ii]->$vecnames(n)$ - vecfld = where(ismissing(vvel),0.0,vvel) - copy_VarAtts(vvel,vecfld) - ;print("unstagger "+vecnames(n)+" from "+vecgrids(n)+" to Ct") - wgtsfile = nemsrc+"/"+"tripole.mx025."+vecgrids(n)+".to.Ct.bilinear.nc" - vt = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - delete(vt@remap) - - ListAppend(vecpairs,ut) - ListAppend(vecpairs,vt) - ;print(vecpairs) - - ; rotate - ; first copy Metadata - urot = vecpairs[0] - vrot = vecpairs[1] - if(vecdims(0) .eq. "2")then - urot = ut*cosrot2d + vt*sinrot2d - vrot = vt*cosrot2d - ut*sinrot2d - else - urot = ut*cosrot3d + vt*sinrot3d - vrot = vt*cosrot3d - ut*sinrot3d - end if - ; change attribute to indicate these are now rotated velocities - urot@long_name=str_sub_str(urot@long_name,"X","Zonal") - vrot@long_name=str_sub_str(vrot@long_name,"Y","Meridional") - ; copy back - vecpairs[0] = urot - vecpairs[1] = vrot - delete([/urot, vrot/]) - - ; remap - do n = 0,1 - vecfld = vecpairs[n] - ; regrid to dsttype+dstgrd with method - ;print("remapping "+vecnames(n)+" to grid "+dsttype+dstgrds(jj)) - wgtsfile = nemsrc+"/"+"tripole.mx025.Ct.to."+dsttype+dstgrds(jj)+"."+vecmeth(n)+".nc" - - rgtt = ESMF_regrid_with_weights(vecfld,wgtsfile,False) - if(vecdims(n) .eq. "2")then - rgtt = where(ismissing(rgmask2d),vecfld@_FillValue,rgtt) - rgtt=rgtt(:,::-1,:) - else - rgtt = where(ismissing(rgmask3d),vecfld@_FillValue,rgtt) - rgtt=rgtt(:,:,::-1,:) - end if - - ; enter file definition mode to add variable attributes - setfileoption(outcdf,"DefineMode",True) - filevarattdef(outcdf, vecnames(n), rgtt) - setfileoption(outcdf,"DefineMode",False) - - outcdf->$vecnames(n)$ = (/rgtt/) - delete(rgtt) - end do - delete([/uvel,vvel,ut,vt,vecfld,vecpairs/]) - delete([/vecnames,vecgrids,vecmeth,vecdims/]) - ; nv, loop over number of vector pairs - end do - -;---------------------------------------------------------------------- -; close the outcdf and continue through filelist -;---------------------------------------------------------------------- - - delete(outcdf) - ; rename mld to ePBL if required - do nv = 0,nvars-1 - varname = varlist(nv,0) - ; if(varname .eq. "mld" .and. valid(nv))then - if(varname .eq. "MLD_003" .and. valid(nv))then - print("Renaming MLD_003 to mld") - ;print(ncocmd+" "+outfile) - system(ncocmd+" "+outfile) - end if - end do - - ; ii, loop over files -; - ;jj, loop over destination grids - delete([/lat,lon,nlon,nlat/]) - delete([/rgmask2d,rgmask3d/]) - end do - print("One complete ocn file in " + (get_cpu_time() - begTime) + " seconds") -exit -end diff --git a/ush/parsing_ufs_configure.sh b/ush/parsing_ufs_configure.sh index 2071586905..bec5c8f0f6 100755 --- a/ush/parsing_ufs_configure.sh +++ b/ush/parsing_ufs_configure.sh @@ -1,20 +1,15 @@ #! /usr/bin/env bash ##### -## This script writes ufs.configure file -## first, select a "*.IN" templates based on -## $confignamevarforufs and parse values based on -## $cpl** switches. -## -## This is a child script of modular -## forecast script. This script is definition only (Is it? There is nothing defined here being used outside this script.) +## This script writes ufs.configure file based on a template defined in +## ${ufs_configure_template} ##### # Disable variable not used warnings # shellcheck disable=SC2034 writing_ufs_configure() { -echo "SUB ${FUNCNAME[0]}: ufs.configure.sh begins" +echo "SUB ${FUNCNAME[0]}: ufs.configure begins" # Setup ufs.configure local esmf_logkind=${esmf_logkind:-"ESMF_LOGKIND_MULTI"} #options: ESMF_LOGKIND_MULTI_ON_ERROR, ESMF_LOGKIND_MULTI, ESMF_LOGKIND_NONE @@ -24,14 +19,13 @@ local cap_dbug_flag=${cap_dbug_flag:-0} # Determine "cmeps_run_type" based on the availability of the mediator restart file # If it is a warm_start, we already copied the mediator restart to DATA, if it was present # If the mediator restart was not present, despite being a "warm_start", we put out a WARNING -# in forecast_postdet.sh +# in forecast_postdet.sh function CMEPS_postdet if [[ -f "${DATA}/ufs.cpld.cpl.r.nc" ]]; then local cmeps_run_type='continue' else local cmeps_run_type='startup' fi - # Atm-related local atm_model="fv3" local atm_petlist_bounds="0 $(( ATMPETS-1 ))" @@ -54,7 +48,7 @@ if [[ "${cplflx}" = ".true." ]]; then local ocn_petlist_bounds="${ATMPETS} $(( ATMPETS+OCNPETS-1 ))" local ocn_omp_num_threads="${OCNTHREADS}" local RUNTYPE="${cmeps_run_type}" - local CMEPS_RESTART_DIR="RESTART/" + local CMEPS_RESTART_DIR="CMEPS_RESTART/" local CPLMODE="${cplmode}" local coupling_interval_fast_sec="${CPL_FAST}" local RESTART_N="${restart_interval}" @@ -95,6 +89,8 @@ fi if [[ ! -r "${ufs_configure_template}" ]]; then echo "FATAL ERROR: template '${ufs_configure_template}' does not exist, ABORT!" exit 1 +else + echo "INFO: using ufs.configure template: '${ufs_configure_template}'" fi source "${HOMEgfs}/ush/atparse.bash" @@ -105,6 +101,6 @@ cat ufs.configure ${NCP} "${HOMEgfs}/sorc/ufs_model.fd/tests/parm/fd_ufs.yaml" fd_ufs.yaml -echo "SUB ${FUNCNAME[0]}: ufs.configure.sh ends for ${ufs_configure_template}" +echo "SUB ${FUNCNAME[0]}: ufs.configure ends" } diff --git a/ush/python/pygfs/task/oceanice_products.py b/ush/python/pygfs/task/oceanice_products.py new file mode 100644 index 0000000000..968acb0750 --- /dev/null +++ b/ush/python/pygfs/task/oceanice_products.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 + +import os +from logging import getLogger +from typing import List, Dict, Any +from pprint import pformat +import xarray as xr + +from wxflow import (AttrDict, + parse_j2yaml, + FileHandler, + Jinja, + logit, + Task, + add_to_datetime, to_timedelta, + WorkflowException, + Executable) + +logger = getLogger(__name__.split('.')[-1]) + + +class OceanIceProducts(Task): + """Ocean Ice Products Task + """ + + VALID_COMPONENTS = ['ocean', 'ice'] + COMPONENT_RES_MAP = {'ocean': 'OCNRES', 'ice': 'ICERES'} + VALID_PRODUCT_GRIDS = {'mx025': ['1p00', '0p25'], + 'mx050': ['1p00', '0p50'], + 'mx100': ['1p00'], + 'mx500': ['5p00']} + + # These could be read from the yaml file + TRIPOLE_DIMS_MAP = {'mx025': [1440, 1080], 'mx050': [720, 526], 'mx100': [360, 320], 'mx500': [72, 35]} + LATLON_DIMS_MAP = {'0p25': [1440, 721], '0p50': [720, 361], '1p00': [360, 181], '5p00': [72, 36]} + + @logit(logger, name="OceanIceProducts") + def __init__(self, config: Dict[str, Any]) -> None: + """Constructor for the Ocean/Ice Productstask + + Parameters + ---------- + config : Dict[str, Any] + Incoming configuration for the task from the environment + + Returns + ------- + None + """ + super().__init__(config) + + if self.config.COMPONENT not in self.VALID_COMPONENTS: + raise NotImplementedError(f'{self.config.COMPONENT} is not a valid model component.\n' + + 'Valid model components are:\n' + + f'{", ".join(self.VALID_COMPONENTS)}') + + model_grid = f"mx{self.config[self.COMPONENT_RES_MAP[self.config.COMPONENT]]:03d}" + + valid_datetime = add_to_datetime(self.runtime_config.current_cycle, to_timedelta(f"{self.config.FORECAST_HOUR}H")) + + # TODO: This is a bit of a hack, but it works for now + # FIXME: find a better way to provide the averaging period + # This will be different for ocean and ice, so when they are made flexible, this will need to be addressed + avg_period = f"{self.config.FORECAST_HOUR-self.config.FHOUT_GFS:03d}-{self.config.FORECAST_HOUR:03d}" + + localdict = AttrDict( + {'component': self.config.COMPONENT, + 'forecast_hour': self.config.FORECAST_HOUR, + 'valid_datetime': valid_datetime, + 'avg_period': avg_period, + 'model_grid': model_grid, + 'product_grids': self.VALID_PRODUCT_GRIDS[model_grid]} + ) + self.task_config = AttrDict(**self.config, **self.runtime_config, **localdict) + + # Read the oceanice_products.yaml file for common configuration + logger.info(f"Read the ocean ice products configuration yaml file {self.config.OCEANICEPRODUCTS_CONFIG}") + self.task_config.oceanice_yaml = parse_j2yaml(self.config.OCEANICEPRODUCTS_CONFIG, self.task_config) + logger.debug(f"oceanice_yaml:\n{pformat(self.task_config.oceanice_yaml)}") + + @staticmethod + @logit(logger) + def initialize(config: Dict) -> None: + """Initialize the work directory by copying all the common fix data + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + + Returns + ------- + None + """ + + # Copy static data to run directory + logger.info("Copy static data to run directory") + FileHandler(config.oceanice_yaml.ocnicepost.fix_data).sync() + + # Copy "component" specific model data to run directory (e.g. ocean/ice forecast output) + logger.info(f"Copy {config.component} data to run directory") + FileHandler(config.oceanice_yaml[config.component].data_in).sync() + + @staticmethod + @logit(logger) + def configure(config: Dict, product_grid: str) -> None: + """Configure the namelist for the product_grid in the work directory. + Create namelist 'ocnicepost.nml' from template + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + product_grid : str + Target product grid to process + + Returns + ------- + None + """ + + # Make a localconf with the "component" specific configuration for parsing the namelist + localconf = AttrDict() + localconf.DATA = config.DATA + localconf.component = config.component + + localconf.source_tripole_dims = ', '.join(map(str, OceanIceProducts.TRIPOLE_DIMS_MAP[config.model_grid])) + localconf.target_latlon_dims = ', '.join(map(str, OceanIceProducts.LATLON_DIMS_MAP[product_grid])) + + localconf.maskvar = config.oceanice_yaml[config.component].namelist.maskvar + localconf.sinvar = config.oceanice_yaml[config.component].namelist.sinvar + localconf.cosvar = config.oceanice_yaml[config.component].namelist.cosvar + localconf.angvar = config.oceanice_yaml[config.component].namelist.angvar + localconf.debug = ".true." if config.oceanice_yaml.ocnicepost.namelist.debug else ".false." + + logger.debug(f"localconf:\n{pformat(localconf)}") + + # Configure the namelist and write to file + logger.info("Create namelist for ocnicepost.x") + nml_template = os.path.join(localconf.DATA, "ocnicepost.nml.jinja2") + nml_data = Jinja(nml_template, localconf).render + logger.debug(f"ocnicepost_nml:\n{nml_data}") + nml_file = os.path.join(localconf.DATA, "ocnicepost.nml") + with open(nml_file, "w") as fho: + fho.write(nml_data) + + @staticmethod + @logit(logger) + def execute(config: Dict, product_grid: str) -> None: + """Run the ocnicepost.x executable to interpolate and convert to grib2 + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + product_grid : str + Target product grid to process + + Returns + ------- + None + """ + + # Run the ocnicepost.x executable + OceanIceProducts.interp(config.DATA, config.APRUN_OCNICEPOST, exec_name="ocnicepost.x") + + # Convert interpolated netCDF file to grib2 + OceanIceProducts.netCDF_to_grib2(config, product_grid) + + @staticmethod + @logit(logger) + def interp(workdir: str, aprun_cmd: str, exec_name: str = "ocnicepost.x") -> None: + """ + Run the interpolation executable to generate rectilinear netCDF file + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + workdir : str + Working directory for the task + aprun_cmd : str + aprun command to use + exec_name : str + Name of the executable e.g. ocnicepost.x + + Returns + ------- + None + """ + os.chdir(workdir) + logger.debug(f"Current working directory: {os.getcwd()}") + + exec_cmd = Executable(aprun_cmd) + exec_cmd.add_default_arg(os.path.join(workdir, exec_name)) + + OceanIceProducts._call_executable(exec_cmd) + + @staticmethod + @logit(logger) + def netCDF_to_grib2(config: Dict, grid: str) -> None: + """Convert interpolated netCDF file to grib2 + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + grid : str + Target product grid to process + + Returns + ------ + None + """ + + os.chdir(config.DATA) + + exec_cmd = Executable(config.oceanice_yaml.nc2grib2.script) + arguments = [config.component, grid, config.current_cycle.strftime("%Y%m%d%H"), config.avg_period] + if config.component == 'ocean': + levs = config.oceanice_yaml.ocean.namelist.ocean_levels + arguments.append(':'.join(map(str, levs))) + + logger.info(f"Executing {exec_cmd} with arguments {arguments}") + try: + exec_cmd(*arguments) + except OSError: + logger.exception(f"FATAL ERROR: Failed to execute {exec_cmd}") + raise OSError(f"{exec_cmd}") + except Exception: + logger.exception(f"FATAL ERROR: Error occurred during execution of {exec_cmd}") + raise WorkflowException(f"{exec_cmd}") + + @staticmethod + @logit(logger) + def subset(config: Dict) -> None: + """ + Subset a list of variables from a netcdf file and save to a new netcdf file. + Also save global attributes and history from the old netcdf file into new netcdf file + + Parameters + ---------- + config : Dict + Configuration dictionary for the task + + Returns + ------- + None + """ + + os.chdir(config.DATA) + + input_file = f"{config.component}.nc" + output_file = f"{config.component}_subset.nc" + varlist = config.oceanice_yaml[config.component].subset + + logger.info(f"Subsetting {varlist} from {input_file} to {output_file}") + + try: + # open the netcdf file + ds = xr.open_dataset(input_file) + + # subset the variables + ds_subset = ds[varlist] + + # save global attributes from the old netcdf file into new netcdf file + ds_subset.attrs = ds.attrs + + # save subsetted variables to a new netcdf file + ds_subset.to_netcdf(output_file) + + except FileNotFoundError: + logger.exception(f"FATAL ERROR: Input file not found: {input_file}") + raise FileNotFoundError(f"File not found: {input_file}") + + except IOError as err: + logger.exception(f"FATAL ERROR: IOError occurred during netCDF subset: {input_file}") + raise IOError(f"An I/O error occurred: {err}") + + except Exception as err: + logger.exception(f"FATAL ERROR: Error occurred during netCDF subset: {input_file}") + raise WorkflowException(f"{err}") + + finally: + # close the netcdf files + ds.close() + ds_subset.close() + + @staticmethod + @logit(logger) + def _call_executable(exec_cmd: Executable) -> None: + """Internal method to call executable + + Parameters + ---------- + exec_cmd : Executable + Executable to run + + Raises + ------ + OSError + Failure due to OS issues + WorkflowException + All other exceptions + """ + + logger.info(f"Executing {exec_cmd}") + try: + exec_cmd() + except OSError: + logger.exception(f"FATAL ERROR: Failed to execute {exec_cmd}") + raise OSError(f"{exec_cmd}") + except Exception: + logger.exception(f"FATAL ERROR: Error occurred during execution of {exec_cmd}") + raise WorkflowException(f"{exec_cmd}") + + @staticmethod + @logit(logger) + def finalize(config: Dict) -> None: + """Perform closing actions of the task. + Copy data back from the DATA/ directory to COM/ + + Parameters + ---------- + config: Dict + Configuration dictionary for the task + + Returns + ------- + None + """ + + # Copy "component" specific generated data to COM/ directory + data_out = config.oceanice_yaml[config.component].data_out + + logger.info(f"Copy processed data to COM/ directory") + FileHandler(data_out).sync() diff --git a/versions/run.spack.ver b/versions/run.spack.ver index c1c13f58df..0812d3389e 100644 --- a/versions/run.spack.ver +++ b/versions/run.spack.ver @@ -21,6 +21,7 @@ export prod_util_ver=1.2.2 export py_netcdf4_ver=1.5.8 export py_pyyaml_ver=5.4.1 export py_jinja2_ver=3.1.2 +export py_xarray_ver=2022.3.0 export obsproc_run_ver=1.1.2 export prepobs_run_ver=1.0.1 diff --git a/workflow/applications/gefs.py b/workflow/applications/gefs.py index 1073397a08..0be4dc7124 100644 --- a/workflow/applications/gefs.py +++ b/workflow/applications/gefs.py @@ -24,6 +24,9 @@ def _get_app_configs(self): if self.do_wave_bnd: configs += ['wavepostbndpnt', 'wavepostbndpntbll'] + if self.do_ocean or self.do_ice: + configs += ['oceanice_products'] + return configs @staticmethod @@ -47,7 +50,13 @@ def get_task_names(self): if self.nens > 0: tasks += ['efcs'] - tasks += ['atmprod'] + tasks += ['atmos_prod'] + + if self.do_ocean: + tasks += ['ocean_prod'] + + if self.do_ice: + tasks += ['ice_prod'] if self.do_wave: tasks += ['wavepostsbs'] diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 6dd0342a78..040fc090cb 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -48,8 +48,8 @@ def _get_app_configs(self): if self.do_vrfy_oceanda: configs += ['ocnanalvrfy'] - if self.do_ocean: - configs += ['ocnpost'] + if self.do_ocean or self.do_ice: + configs += ['oceanice_products'] configs += ['sfcanl', 'analcalc', 'fcst', 'upp', 'atmos_products', 'arch', 'cleanup'] @@ -178,7 +178,7 @@ def get_task_names(self): if self.do_upp: gdas_tasks += ['atmupp'] - gdas_tasks += ['atmprod'] + gdas_tasks += ['atmos_prod'] if self.do_wave and 'gdas' in self.wave_cdumps: if self.do_wave_bnd: @@ -210,9 +210,15 @@ def get_task_names(self): gfs_tasks += ['atmanlupp', 'atmanlprod', 'fcst'] + if self.do_ocean: + gfs_tasks += ['ocean_prod'] + + if self.do_ice: + gfs_tasks += ['ice_prod'] + if self.do_upp: gfs_tasks += ['atmupp'] - gfs_tasks += ['atmprod'] + gfs_tasks += ['atmos_prod'] if self.do_goes: gfs_tasks += ['goesupp'] diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py index 1145863210..0a9648ee65 100644 --- a/workflow/applications/gfs_forecast_only.py +++ b/workflow/applications/gfs_forecast_only.py @@ -49,7 +49,7 @@ def _get_app_configs(self): configs += ['awips'] if self.do_ocean or self.do_ice: - configs += ['ocnpost'] + configs += ['oceanice_products'] if self.do_wave: configs += ['waveinit', 'waveprep', 'wavepostsbs', 'wavepostpnt'] @@ -100,7 +100,7 @@ def get_task_names(self): if self.do_upp: tasks += ['atmupp'] - tasks += ['atmprod'] + tasks += ['atmos_prod'] if self.do_goes: tasks += ['goesupp'] @@ -126,8 +126,11 @@ def get_task_names(self): if self.do_awips: tasks += ['awips_20km_1p0deg', 'awips_g2', 'fbwind'] - if self.do_ocean or self.do_ice: - tasks += ['ocnpost'] + if self.do_ocean: + tasks += ['ocean_prod'] + + if self.do_ice: + tasks += ['ice_prod'] if self.do_wave: if self.do_wave_bnd: diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index 3eb249dc76..50b24f3578 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -148,7 +148,7 @@ def efcs(self): 'maxtries': '&MAXTRIES;' } - member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])} + member_var_dict = {'member': ' '.join([f"{mem:03d}" for mem in range(1, self.nmem + 1)])} metatask_dict = {'task_name': 'fcst_ens', 'var_dict': member_var_dict, 'task_dict': task_dict @@ -158,48 +158,76 @@ def efcs(self): return task - def atmprod(self): - atm_master_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_MASTER_TMPL"], {'MEMDIR': 'mem#member#'}) + def atmos_prod(self): + return self._atmosoceaniceprod('atmos') + + def ocean_prod(self): + return self._atmosoceaniceprod('ocean') + + def ice_prod(self): + return self._atmosoceaniceprod('ice') + + def _atmosoceaniceprod(self, component: str): + + products_dict = {'atmos': {'config': 'atmos_products', + 'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL', + 'history_file_tmpl': f'{self.cdump}.t@Hz.master.grb2f#fhr#'}, + 'ocean': {'config': 'oceanice_products', + 'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL', + 'history_file_tmpl': f'{self.cdump}.ocean.t@Hz.6hr_avg.f#fhr#.nc'}, + 'ice': {'config': 'oceanice_products', + 'history_path_tmpl': 'COM_ICE_HISTORY_TMPL', + 'history_file_tmpl': f'{self.cdump}.ice.t@Hz.6hr_avg.f#fhr#.nc'}} + + component_dict = products_dict[component] + config = component_dict['config'] + history_path_tmpl = component_dict['history_path_tmpl'] + history_file_tmpl = component_dict['history_file_tmpl'] + + resources = self.get_resource(config) + + history_path = self._template_to_rocoto_cycstring(self._base[history_path_tmpl], {'MEMDIR': 'mem#member#'}) deps = [] - data = f'{atm_master_path}/{self.cdump}.t@Hz.master.grb2f#fhr#' + data = f'{history_path}/{history_file_tmpl}' dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) - atm_prod_envars = self.envars.copy() + postenvars = self.envars.copy() postenvar_dict = {'ENSMEM': '#member#', 'MEMDIR': 'mem#member#', 'FHRLST': '#fhr#', - } + 'COMPONENT': component} for key, value in postenvar_dict.items(): - atm_prod_envars.append(rocoto.create_envar(name=key, value=str(value))) - - resources = self.get_resource('atmos_products') + postenvars.append(rocoto.create_envar(name=key, value=str(value))) - task_name = f'atm_prod_mem#member#_f#fhr#' + task_name = f'{component}_prod_mem#member#_f#fhr#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, - 'envars': atm_prod_envars, + 'envars': postenvars, 'cycledef': 'gefs', - 'command': f'{self.HOMEgfs}/jobs/rocoto/atmos_products.sh', + 'command': f'{self.HOMEgfs}/jobs/rocoto/{config}.sh', 'job_name': f'{self.pslot}_{task_name}_@H', 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', - 'maxtries': '&MAXTRIES;' - } + 'maxtries': '&MAXTRIES;'} + + fhrs = self._get_forecast_hours('gefs', self._configs[config]) + + # ocean/ice components do not have fhr 0 as they are averaged output + if component in ['ocean', 'ice']: + fhrs.remove(0) - fhr_var_dict = {'fhr': ' '.join([str(fhr).zfill(3) for fhr in - self._get_forecast_hours('gefs', self._configs['atmos_products'])])} - fhr_metatask_dict = {'task_name': 'atm_prod_#member#', + fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} + + fhr_metatask_dict = {'task_name': f'{component}_prod_#member#', 'task_dict': task_dict, - 'var_dict': fhr_var_dict - } + 'var_dict': fhr_var_dict} - member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(0, self.nmem + 1)])} - member_metatask_dict = {'task_name': 'atm_prod', + member_var_dict = {'member': ' '.join([f"{mem:03d}" for mem in range(0, self.nmem + 1)])} + member_metatask_dict = {'task_name': f'{component}_prod', 'task_dict': fhr_metatask_dict, - 'var_dict': member_var_dict - } + 'var_dict': member_var_dict} task = rocoto.create_task(member_metatask_dict) diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 9102c74e35..83623f42d2 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -99,7 +99,7 @@ def prep(self): gfs_enkf = True if self.app_config.do_hybvar and 'gfs' in self.app_config.eupd_cdumps else False deps = [] - dep_dict = {'type': 'metatask', 'name': 'gdasatmprod', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} + dep_dict = {'type': 'metatask', 'name': 'gdasatmos_prod', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} deps.append(rocoto.add_dependency(dep_dict)) data = f'{atm_hist_path}/gdas.t@Hz.atmf009.nc' dep_dict = {'type': 'data', 'data': data, 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} @@ -583,7 +583,7 @@ def prepoceanobs(self): ocean_hist_path = self._template_to_rocoto_cycstring(self._base["COM_OCEAN_HISTORY_TMPL"], {'RUN': 'gdas'}) deps = [] - data = f'{ocean_hist_path}/gdas.t@Hz.ocnf009.nc' + data = f'{ocean_hist_path}/gdas.ocean.t@Hz.inst.f009.nc' dep_dict = {'type': 'data', 'data': data, 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -927,9 +927,21 @@ def atmanlprod(self): return task @staticmethod - def _get_ufs_postproc_grps(cdump, config): + def _get_ufs_postproc_grps(cdump, config, component='atmos'): - fhrs = Tasks._get_forecast_hours(cdump, config) + # Make a local copy of the config to avoid modifying the original + local_config = config.copy() + + # Ocean/Ice components do not have a HF output option like the atmosphere + if component in ['ocean', 'ice']: + local_config['FHMAX_HF_GFS'] = config['FHMAX_GFS'] + local_config['FHOUT_HF_GFS'] = config['FHOUT_GFS'] + + fhrs = Tasks._get_forecast_hours(cdump, local_config) + + # ocean/ice components do not have fhr 0 as they are averaged output + if component in ['ocean', 'ice']: + fhrs.remove(0) nfhrs_per_grp = config.get('NFHRS_PER_GROUP', 1) ngrps = len(fhrs) // nfhrs_per_grp if len(fhrs) % nfhrs_per_grp == 0 else len(fhrs) // nfhrs_per_grp + 1 @@ -1002,83 +1014,63 @@ def _upptask(self, upp_run="forecast", task_id="atmupp"): return task - def atmprod(self): + def atmos_prod(self): + return self._atmosoceaniceprod('atmos') - varname1, varname2, varname3 = 'grp', 'dep', 'lst' - varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['atmos_products']) - var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} + def ocean_prod(self): + return self._atmosoceaniceprod('ocean') - postenvars = self.envars.copy() - postenvar_dict = {'FHRLST': '#lst#'} - for key, value in postenvar_dict.items(): - postenvars.append(rocoto.create_envar(name=key, value=str(value))) + def ice_prod(self): + return self._atmosoceaniceprod('ice') - atm_master_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_MASTER_TMPL"]) - deps = [] - data = f'{atm_master_path}/{self.cdump}.t@Hz.master.grb2#dep#' - dep_dict = {'type': 'data', 'data': data, 'age': 120} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) - cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump - resources = self.get_resource('atmos_products') + def _atmosoceaniceprod(self, component: str): - task_name = f'{self.cdump}atmprod#{varname1}#' - task_dict = {'task_name': task_name, - 'resources': resources, - 'dependency': dependencies, - 'envars': postenvars, - 'cycledef': cycledef, - 'command': f'{self.HOMEgfs}/jobs/rocoto/atmos_products.sh', - 'job_name': f'{self.pslot}_{task_name}_@H', - 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', - 'maxtries': '&MAXTRIES;' - } + products_dict = {'atmos': {'config': 'atmos_products', + 'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL', + 'history_file_tmpl': f'{self.cdump}.t@Hz.master.grb2#dep#'}, + 'ocean': {'config': 'oceanice_products', + 'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL', + 'history_file_tmpl': f'{self.cdump}.ocean.t@Hz.6hr_avg.#dep#.nc'}, + 'ice': {'config': 'oceanice_products', + 'history_path_tmpl': 'COM_ICE_HISTORY_TMPL', + 'history_file_tmpl': f'{self.cdump}.ice.t@Hz.6hr_avg.#dep#.nc'}} - metatask_dict = {'task_name': f'{self.cdump}atmprod', - 'task_dict': task_dict, - 'var_dict': var_dict - } - - task = rocoto.create_task(metatask_dict) - - return task - - def ocnpost(self): + component_dict = products_dict[component] + config = component_dict['config'] + history_path_tmpl = component_dict['history_path_tmpl'] + history_file_tmpl = component_dict['history_file_tmpl'] varname1, varname2, varname3 = 'grp', 'dep', 'lst' - varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['ocnpost']) + varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs[config], component=component) var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} postenvars = self.envars.copy() - postenvar_dict = {'FHRLST': '#lst#', - 'ROTDIR': self.rotdir} + postenvar_dict = {'FHRLST': '#lst#', 'COMPONENT': component} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) + history_path = self._template_to_rocoto_cycstring(self._base[history_path_tmpl]) deps = [] - atm_hist_path = self._template_to_rocoto_cycstring(self._base["COM_ATMOS_HISTORY_TMPL"]) - data = f'{atm_hist_path}/{self.cdump}.t@Hz.atm.log#dep#.txt' - dep_dict = {'type': 'data', 'data': data} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst'} + data = f'{history_path}/{history_file_tmpl}' + dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) + dependencies = rocoto.create_dependency(dep=deps) cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump - resources = self.get_resource('ocnpost') + resources = self.get_resource(component_dict['config']) - task_name = f'{self.cdump}ocnpost#{varname1}#' + task_name = f'{self.cdump}{component}_prod#{varname1}#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, 'envars': postenvars, 'cycledef': cycledef, - 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnpost.sh', + 'command': f"{self.HOMEgfs}/jobs/rocoto/{config}.sh", 'job_name': f'{self.pslot}_{task_name}_@H', 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;' } - metatask_dict = {'task_name': f'{self.cdump}ocnpost', + metatask_dict = {'task_name': f'{self.cdump}{component}_prod', 'task_dict': task_dict, 'var_dict': var_dict } @@ -1357,7 +1349,7 @@ def _get_awipsgroups(cdump, config): def awips_20km_1p0deg(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1398,7 +1390,7 @@ def awips_20km_1p0deg(self): def awips_g2(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1439,7 +1431,7 @@ def awips_g2(self): def gempak(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1462,7 +1454,7 @@ def gempak(self): def gempakmeta(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1485,7 +1477,7 @@ def gempakmeta(self): def gempakmetancdc(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1508,7 +1500,7 @@ def gempakmetancdc(self): def gempakncdcupapgif(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1647,7 +1639,7 @@ def vminmon(self): def tracker(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1670,7 +1662,7 @@ def tracker(self): def genesis(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1693,7 +1685,7 @@ def genesis(self): def genesis_fsu(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1716,7 +1708,7 @@ def genesis_fsu(self): def fit2obs(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1781,7 +1773,7 @@ def metp(self): def mos_stn_prep(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1804,7 +1796,7 @@ def mos_stn_prep(self): def mos_grd_prep(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1827,7 +1819,7 @@ def mos_grd_prep(self): def mos_ext_stn_prep(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1850,7 +1842,7 @@ def mos_ext_stn_prep(self): def mos_ext_grd_prep(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -2168,7 +2160,7 @@ def arch(self): dep_dict = {'type': 'task', 'name': f'{self.cdump}genesis_fsu'} deps.append(rocoto.add_dependency(dep_dict)) # Post job dependencies - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmprod'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} deps.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostsbs'} @@ -2179,8 +2171,12 @@ def arch(self): dep_dict = {'type': 'task', 'name': f'{self.cdump}wavepostbndpnt'} deps.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_ocean: - if self.app_config.mode in ['forecast-only']: # TODO: fix ocnpost to run in cycled mode - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ocnpost'} + if self.cdump in ['gfs']: + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ocean_prod'} + deps.append(rocoto.add_dependency(dep_dict)) + if self.app_config.do_ice: + if self.cdump in ['gfs']: + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}ice_prod'} deps.append(rocoto.add_dependency(dep_dict)) # MOS job dependencies if self.cdump in ['gfs'] and self.app_config.do_mos: diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index 110dc286b5..540f6ebe47 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -22,8 +22,8 @@ class Tasks: 'aeroanlinit', 'aeroanlrun', 'aeroanlfinal', 'preplandobs', 'landanl', 'fcst', - 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', 'goesupp', - 'ocnpost', + 'atmanlupp', 'atmanlprod', 'atmupp', 'goesupp', + 'atmosprod', 'oceanprod', 'iceprod', 'verfozn', 'verfrad', 'vminmon', 'metp', 'tracker', 'genesis', 'genesis_fsu', @@ -128,7 +128,7 @@ def _get_forecast_hours(cdump, config) -> List[str]: # Get a list of all forecast hours fhrs = [] if cdump in ['gdas']: - fhrs = range(fhmin, fhmax + fhout, fhout) + fhrs = list(range(fhmin, fhmax + fhout, fhout)) elif cdump in ['gfs', 'gefs']: fhmax = config['FHMAX_GFS'] fhout = config['FHOUT_GFS'] From 1aaef05d317cd1eec548ef2b9842679c531cef8b Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Tue, 13 Feb 2024 18:15:59 -0500 Subject: [PATCH 11/16] Jenkins Pipeline updates for Canceling Jobs (#2307) Tuning updates for Jenkins Pipeline : - Added short circuit for all parallel runs of cases on error of any - Fixed canceling of all scheduled jobs on first case error - Added feature to save error log files to Jenkins Archive facility on fail --- Jenkinsfile | 71 +++++++++++++++------------ ci/cases/pr/C48mx500_3DVarAOWCDA.yaml | 3 +- ci/scripts/run-check_ci.sh | 16 +++--- ci/scripts/utils/ci_utils.sh | 8 +++ 4 files changed, 59 insertions(+), 39 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9f3688ea6c..be62a20512 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,7 +9,7 @@ pipeline { options { skipDefaultCheckout() - buildDiscarder(logRotator(numToKeepStr: '2')) + parallelsAlwaysFailFast() } stages { // This initial stage is used to get the Machine name from the GitHub labels on the PR @@ -45,14 +45,14 @@ pipeline { properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in', 'Hera-EMC', 'Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])]) HOME = "${WORKSPACE}/TESTDIR" commonworkspace = "${WORKSPACE}" - sh(script: "mkdir -p ${HOME}/RUNTESTS", returnStatus: true) + sh(script: "mkdir -p ${HOME}/RUNTESTS") pullRequest.addLabel("CI-${Machine}-Building") if (pullRequest.labels.any { value -> value.matches("CI-${Machine}-Ready") }) { pullRequest.removeLabel("CI-${Machine}-Ready") + } } } } - } stage('Build System') { matrix { @@ -71,35 +71,41 @@ pipeline { steps { script { def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to build the system on per system basis under the common workspace HOME - sh(script: "mkdir -p ${HOMEgfs}", returnStatus: true) + sh(script: "mkdir -p ${HOMEgfs}") ws(HOMEgfs) { env.MACHINE_ID = machine // MACHINE_ID is used in the build scripts to determine the machine and is added to the shell environment if (fileExists("${HOMEgfs}/sorc/BUILT_semaphor")) { // if the system is already built, skip the build in the case of re-runs sh(script: "cat ${HOMEgfs}/sorc/BUILT_semaphor", returnStdout: true).trim() // TODO: and user configurable control to manage build semphore - ws(commonworkspace) { pullRequest.comment("Cloned PR already built (or build skipped) on ${machine} in directory ${HOMEgfs}") } + pullRequest.comment("Cloned PR already built (or build skipped) on ${machine} in directory ${HOMEgfs}
Still doing a checkout to get the latest changes") + sh(script: 'source workflow/gw_setup.sh; git pull --recurse-submodules') + dir('sorc') { + sh(script: './link_workflow.sh') + } } else { checkout scm - sh(script: 'source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive', returnStatus: true) + sh(script: 'source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive') def builds_file = readYaml file: 'ci/cases/yamls/build.yaml' def build_args_list = builds_file['builds'] def build_args = build_args_list[system].join(' ').trim().replaceAll('null', '') dir("${HOMEgfs}/sorc") { - sh(script: "${build_args}", returnStatus: true) - sh(script: './link_workflow.sh', returnStatus: true) - sh(script: "echo ${HOMEgfs} > BUILT_semaphor", returnStatus: true) + sh(script: "${build_args}") + sh(script: './link_workflow.sh') + sh(script: "echo ${HOMEgfs} > BUILT_semaphor") } } - if (pullRequest.labels.any { value -> value.matches("CI-${Machine}-Building") }) { - pullRequest.removeLabel("CI-${Machine}-Building") - } - pullRequest.addLabel("CI-${Machine}-Running") - } + if (env.CHANGE_ID && system == 'gfs') { + if (pullRequest.labels.any { value -> value.matches("CI-${Machine}-Building") }) { + pullRequest.removeLabel("CI-${Machine}-Building") + } + pullRequest.addLabel("CI-${Machine}-Running") + } + } + } } } } } } -} stage('Run Tests') { matrix { @@ -108,19 +114,19 @@ pipeline { axis { name 'Case' // TODO add dynamic list of cases from env vars (needs addtional plugins) - values 'C48_ATM', 'C48_S2SWA_gefs', 'C48_S2SW', 'C96_atm3DVar', 'C48mx500_3DVarAOWCDA', 'C96C48_hybatmDA', 'C96_atmsnowDA' + values 'C48_ATM', 'C48_S2SWA_gefs', 'C48_S2SW', 'C96_atm3DVar', 'C96C48_hybatmDA', 'C96_atmsnowDA' // 'C48mx500_3DVarAOWCDA' } } stages { stage('Create Experiment') { steps { script { - sh(script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp", returnStatus: true) + sh(script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp") def yaml_case = readYaml file: "${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp" system = yaml_case.experiment.system def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to populate the XML on per system basis env.RUNTESTS = "${HOME}/RUNTESTS" - sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStatus: true) + sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml") } } } @@ -130,16 +136,27 @@ pipeline { HOMEgfs = "${HOME}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments ws(HOMEgfs) { pslot = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true).trim() - pullRequest.comment("**Running** experiment: ${Case} on ${Machine}
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`") - try { - sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}", returnStatus: true) - } catch (Exception e) { + // pullRequest.comment("**Running** experiment: ${Case} on ${Machine}
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`") + err = sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}") + if (err != 0) { pullRequest.comment("**FAILURE** running experiment: ${Case} on ${Machine}") + sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cancel_all_batch_jobs ${HOME}/RUNTESTS") + ws(HOME) { + if (fileExists('RUNTESTS/error.logs')) { + def fileContent = readFile 'RUNTESTS/error.logs' + def lines = fileContent.readLines() + for (line in lines) { + echo "archiving: ${line}" + archiveArtifacts artifacts: "${line}", fingerprint: true + } + } + } error("Failed to run experiments ${Case} on ${Machine}") } - pullRequest.comment("**SUCCESS** running experiment: ${Case} on ${Machine}") + // pullRequest.comment("**SUCCESS** running experiment: ${Case} on ${Machine}") } } + } } } @@ -175,14 +192,6 @@ pipeline { def timestamp = new Date().format('MM dd HH:mm:ss', TimeZone.getTimeZone('America/New_York')) pullRequest.comment("**CI FAILED** ${Machine} at ${timestamp}
Built and ran in directory `${HOME}`") } - if (fileExists('${HOME}/RUNTESTS/ci.log')) { - def fileContent = readFile '${HOME}/RUNTESTS/ci.log' - fileContent.eachLine { line -> - if (line.contains('.log')) { - archiveArtifacts artifacts: "${line}", fingerprint: true - } - } - } } } } diff --git a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml index b972d3a445..6e9fc6d3de 100644 --- a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml +++ b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml @@ -17,6 +17,7 @@ arguments: start: warm yaml: {{ HOMEgfs }}/ci/cases/yamls/soca_gfs_defaults_ci.yaml -skip_ci_on_hosts: +skip_ci_on_host: - orion + - hera - hercules diff --git a/ci/scripts/run-check_ci.sh b/ci/scripts/run-check_ci.sh index f98f434462..8e1e927050 100755 --- a/ci/scripts/run-check_ci.sh +++ b/ci/scripts/run-check_ci.sh @@ -25,6 +25,7 @@ pslot=${2:-${pslot:-?}} # Name of the experiment being tested by this scr # TODO: Make this configurable (for now all scripts run from gfs for CI at runtime) HOMEgfs="${TEST_DIR}/gfs" RUNTESTS="${TEST_DIR}/RUNTESTS" +run_check_logfile="${RUNTESTS}/ci-run_check.log" # Source modules and setup logging echo "Source modules." @@ -77,15 +78,16 @@ while true; do { echo "Experiment ${pslot} Terminated with ${num_failed} tasks failed at $(date)" || true echo "Experiment ${pslot} Terminated: *FAILED*" - } >> "${RUNTESTS}/ci.log" - + } | tee -a "${run_check_logfile}" error_logs=$(rocotostat -d "${db}" -w "${xml}" | grep -E 'FAIL|DEAD' | awk '{print "-c", $1, "-t", $2}' | xargs rocotocheck -d "${db}" -w "${xml}" | grep join | awk '{print $2}') || true { echo "Error logs:" echo "${error_logs}" - } >> "${RUNTESTS}/ci.log" - sed -i "s/\`\`\`//2g" "${RUNTESTS}/ci.log" - sacct --format=jobid,jobname%35,WorkDir%100,stat | grep "${pslot}" | grep "${pr}\/RUNTESTS" | awk '{print $1}' | xargs scancel || true + } | tee -a "${run_check_logfile}" + # rm -f "${RUNTESTS}/error.logs" + for log in ${error_logs}; do + echo "RUNTESTS${log#*RUNTESTS}" >> "${RUNTESTS}/error.logs" + done rc=1 break fi @@ -95,8 +97,7 @@ while true; do echo "Experiment ${pslot} Completed at $(date)" || true echo "with ${num_succeeded} successfully completed jobs" || true echo "Experiment ${pslot} Completed: *SUCCESS*" - } >> "${RUNTESTS}/ci.log" - sed -i "s/\`\`\`//2g" "${RUNTESTS}/ci.log" + } | tee -a "${run_check_logfile}" rc=0 break fi @@ -107,3 +108,4 @@ while true; do done exit "${rc}" + diff --git a/ci/scripts/utils/ci_utils.sh b/ci/scripts/utils/ci_utils.sh index 6f2426c388..ce2e039307 100755 --- a/ci/scripts/utils/ci_utils.sh +++ b/ci/scripts/utils/ci_utils.sh @@ -102,6 +102,14 @@ function get_pslot () { } +function cancel_all_batch_jobs () { + local RUNTESTS="${1}" + pslot_list=$(get_pslot_list "${RUNTESTS}") + for pslot in ${pslot_list}; do + cancel_batch_jobs "${pslot}" + done +} + function create_experiment () { local yaml_config="${1}" From 638684e0bfcd06700cc8695f09824891a0a1eee1 Mon Sep 17 00:00:00 2001 From: Kate Friedman Date: Wed, 14 Feb 2024 14:55:21 -0500 Subject: [PATCH 12/16] Remove `finddate.sh` from system (#2308) * Retire finddate.sh usage from system * Update gfs-utils hash to 7a84c88 - New hash includes removal of finddate.sh Refs #2279 --- sorc/gfs_utils.fd | 2 +- sorc/link_workflow.sh | 2 +- ush/syndat_getjtbul.sh | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/sorc/gfs_utils.fd b/sorc/gfs_utils.fd index 6ddd1460d9..7a84c884d2 160000 --- a/sorc/gfs_utils.fd +++ b/sorc/gfs_utils.fd @@ -1 +1 @@ -Subproject commit 6ddd1460d9f7c292f04573ab2bdc988a05ed618b +Subproject commit 7a84c884d210960bd9f59e6ae09c2e7f0e1e39b1 diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 39e2a7785f..21bc30faa4 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -155,7 +155,7 @@ cd "${HOMEgfs}/ush" || exit 8 for file in emcsfc_ice_blend.sh global_cycle_driver.sh emcsfc_snow.sh global_cycle.sh; do ${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_utils.fd/ush/${file}" . done -for file in finddate.sh make_ntc_bull.pl make_NTC_file.pl make_tif.sh month_name.sh ; do +for file in make_ntc_bull.pl make_NTC_file.pl make_tif.sh month_name.sh ; do ${LINK_OR_COPY} "${HOMEgfs}/sorc/gfs_utils.fd/ush/${file}" . done diff --git a/ush/syndat_getjtbul.sh b/ush/syndat_getjtbul.sh index c17067ff72..a68187868a 100755 --- a/ush/syndat_getjtbul.sh +++ b/ush/syndat_getjtbul.sh @@ -52,8 +52,6 @@ hour=$(echo $CDATE10 | cut -c9-10) echo $PDYm1 pdym1=$PDYm1 -#pdym1=$(sh $utilscript/finddate.sh $pdy d-1) - echo " " >> $pgmout echo "Entering sub-shell syndat_getjtbul.sh to recover JTWC Bulletins" \ >> $pgmout From d465ea06e8b2a8f3a5eb1120647c1e2ce5197d66 Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Thu, 15 Feb 2024 19:25:02 +0000 Subject: [PATCH 13/16] Set HOMEgfs for module_setup in CI driver (#2321) Hotfixes to CI Bash system from updates with sourcing `detect_machine.sh` in `ush/module-setup.sh` using **HOMEgfs**. --- ci/scripts/check_ci.sh | 23 ++++++++++++----------- ci/scripts/driver.sh | 3 +++ ci/scripts/run_ci.sh | 13 +++++++------ 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/ci/scripts/check_ci.sh b/ci/scripts/check_ci.sh index cda2d4e9f2..4ff7eefd26 100755 --- a/ci/scripts/check_ci.sh +++ b/ci/scripts/check_ci.sh @@ -8,7 +8,7 @@ set -eux # to run from within a cron job in the CI Managers account ##################################################################################### -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." >/dev/null 2>&1 && pwd )" +HOMEgfs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." >/dev/null 2>&1 && pwd )" scriptname=$(basename "${BASH_SOURCE[0]}") echo "Begin ${scriptname} at $(date -u)" || true export PS4='+ $(basename ${BASH_SOURCE})[${LINENO}]' @@ -20,11 +20,11 @@ REPO_URL="https://github.com/NOAA-EMC/global-workflow.git" # Set up runtime environment varibles for accounts on supproted machines ######################################################################### -source "${ROOT_DIR}/ush/detect_machine.sh" +source "${HOMEgfs}/ush/detect_machine.sh" case ${MACHINE_ID} in hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" - source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" + source "${HOMEgfs}/ci/platforms/config.${MACHINE_ID}" ;; *) echo "Unsupported platform. Exiting with error." @@ -32,9 +32,10 @@ case ${MACHINE_ID} in ;; esac set +x -source "${ROOT_DIR}/ush/module-setup.sh" -source "${ROOT_DIR}/ci/scripts/utils/ci_utils.sh" -module use "${ROOT_DIR}/modulefiles" +export HOMEgfs +source "${HOMEgfs}/ush/module-setup.sh" +source "${HOMEgfs}/ci/scripts/utils/ci_utils.sh" +module use "${HOMEgfs}/modulefiles" module load "module_gwsetup.${MACHINE_ID}" module list set -x @@ -57,7 +58,7 @@ pr_list_dbfile="${GFS_CI_ROOT}/open_pr_list.db" pr_list="" if [[ -f "${pr_list_dbfile}" ]]; then - pr_list=$("${ROOT_DIR}/ci/scripts/pr_list_database.py" --dbfile "${pr_list_dbfile}" --display | grep -v Failed | grep Running | awk '{print $1}') || true + pr_list=$("${HOMEgfs}/ci/scripts/pr_list_database.py" --dbfile "${pr_list_dbfile}" --display | grep -v Failed | grep Running | awk '{print $1}') || true fi if [[ -z "${pr_list+x}" ]]; then echo "no PRs open and ready to run cases on .. exiting" @@ -89,13 +90,13 @@ for pr in ${pr_list}; do sed -i "1 i\`\`\`" "${output_ci}" sed -i "1 i\All CI Test Cases Passed on ${MACHINE_ID^}:" "${output_ci}" "${GH}" pr comment "${pr}" --repo "${REPO_URL}" --body-file "${output_ci}" - "${ROOT_DIR}/ci/scripts/pr_list_database.py" --remove_pr "${pr}" --dbfile "${pr_list_dbfile}" + "${HOMEgfs}/ci/scripts/pr_list_database.py" --remove_pr "${pr}" --dbfile "${pr_list_dbfile}" # Check to see if this PR that was opened by the weekly tests and if so close it if it passed on all platforms weekly_labels=$(${GH} pr view "${pr}" --repo "${REPO_URL}" --json headRefName,labels,author --jq 'select(.author.login | contains("emcbot")) | select(.headRefName | contains("weekly_ci")) | .labels[].name ') || true if [[ -n "${weekly_labels}" ]]; then - num_platforms=$(find "${ROOT_DIR}/ci/platforms" -type f -name "config.*" | wc -l) + num_platforms=$(find "${HOMEgfs}/ci/platforms" -type f -name "config.*" | wc -l) passed=0 - for platforms in "${ROOT_DIR}"/ci/platforms/config.*; do + for platforms in "${HOMEgfs}"/ci/platforms/config.*; do machine=$(basename "${platforms}" | cut -d. -f2) if [[ "${weekly_labels}" == *"CI-${machine^}-Passed"* ]]; then ((passed=passed+1)) @@ -139,7 +140,7 @@ for pr in ${pr_list}; do } >> "${output_ci}" sed -i "1 i\`\`\`" "${output_ci}" "${GH}" pr comment "${pr}" --repo "${REPO_URL}" --body-file "${output_ci}" - "${ROOT_DIR}/ci/scripts/pr_list_database.py" --remove_pr "${pr}" --dbfile "${pr_list_dbfile}" + "${HOMEgfs}/ci/scripts/pr_list_database.py" --remove_pr "${pr}" --dbfile "${pr_list_dbfile}" for kill_cases in "${pr_dir}/RUNTESTS/"*; do pslot=$(basename "${kill_cases}") cancel_slurm_jobs "${pslot}" diff --git a/ci/scripts/driver.sh b/ci/scripts/driver.sh index 5fc13ea524..f37b5e3f2e 100755 --- a/ci/scripts/driver.sh +++ b/ci/scripts/driver.sh @@ -47,12 +47,15 @@ esac ###################################################### # setup runtime env for correct python install and git ###################################################### +HOMEgfs=${ROOT_DIR} +export HOMEgfs set +x source "${ROOT_DIR}/ci/scripts/utils/ci_utils.sh" source "${ROOT_DIR}/ush/module-setup.sh" module use "${ROOT_DIR}/modulefiles" module load "module_gwsetup.${MACHINE_ID}" set -x +unset HOMEgfs ############################################################ # query repo and get list of open PRs with tags {machine}-CI diff --git a/ci/scripts/run_ci.sh b/ci/scripts/run_ci.sh index 4a390a23f2..f50a4465d0 100755 --- a/ci/scripts/run_ci.sh +++ b/ci/scripts/run_ci.sh @@ -9,7 +9,7 @@ set -eux # Abstract TODO ##################################################################################### -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." >/dev/null 2>&1 && pwd )" +HOMEgfs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." >/dev/null 2>&1 && pwd )" scriptname=$(basename "${BASH_SOURCE[0]}") echo "Begin ${scriptname} at $(date -u)" || true export PS4='+ $(basename ${BASH_SOURCE})[${LINENO}]' @@ -18,11 +18,11 @@ export PS4='+ $(basename ${BASH_SOURCE})[${LINENO}]' # Set up runtime environment varibles for accounts on supproted machines ######################################################################### -source "${ROOT_DIR}/ush/detect_machine.sh" +source "${HOMEgfs}/ush/detect_machine.sh" case ${MACHINE_ID} in hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" - source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" + source "${HOMEgfs}/ci/platforms/config.${MACHINE_ID}" ;; *) echo "Unsupported platform. Exiting with error." @@ -30,8 +30,9 @@ case ${MACHINE_ID} in ;; esac set +x -source "${ROOT_DIR}/ush/module-setup.sh" -module use "${ROOT_DIR}/modulefiles" +export HOMEgfs +source "${HOMEgfs}/ush/module-setup.sh" +module use "${HOMEgfs}/modulefiles" module load "module_gwsetup.${MACHINE_ID}" module list set -eux @@ -47,7 +48,7 @@ pr_list_dbfile="${GFS_CI_ROOT}/open_pr_list.db" pr_list="" if [[ -f "${pr_list_dbfile}" ]]; then - pr_list=$("${ROOT_DIR}/ci/scripts/pr_list_database.py" --display --dbfile "${pr_list_dbfile}" | grep -v Failed | grep Open | grep Running | awk '{print $1}' | head -"${max_concurrent_pr}") || true + pr_list=$("${HOMEgfs}/ci/scripts/pr_list_database.py" --display --dbfile "${pr_list_dbfile}" | grep -v Failed | grep Open | grep Running | awk '{print $1}' | head -"${max_concurrent_pr}") || true fi if [[ -z "${pr_list}" ]]; then echo "no open and built PRs that are ready for the cases to advance with rocotorun .. exiting" From 094e3b86da44f1d3fc1d99f68f6fdfcd36deb09f Mon Sep 17 00:00:00 2001 From: Cory Martin Date: Thu, 15 Feb 2024 14:43:55 -0500 Subject: [PATCH 14/16] Move IMS remapping files from COM_OBS to FIXgdas (#2322) * Add in IMS obs fix directory and update submodule for gdas --- parm/config/gfs/config.base.emc.dyn | 1 + sorc/gdas.cd | 2 +- sorc/link_workflow.sh | 2 +- ush/python/pygfs/task/land_analysis.py | 2 +- versions/fix.ver | 1 + 5 files changed, 5 insertions(+), 3 deletions(-) diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 1f6568c3ee..48b66ab1b1 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -39,6 +39,7 @@ export FIXcice="${FIXgfs}/cice" export FIXmom="${FIXgfs}/mom6" export FIXreg2grb2="${FIXgfs}/reg2grb2" export FIXugwd="${FIXgfs}/ugwd" +export FIXgdas="${FIXgfs}/gdas" ######################################################################## diff --git a/sorc/gdas.cd b/sorc/gdas.cd index 831b08a3f9..b37680be78 160000 --- a/sorc/gdas.cd +++ b/sorc/gdas.cd @@ -1 +1 @@ -Subproject commit 831b08a3f947e8d743e2afbd6d38ecc4b0dec3b1 +Subproject commit b37680be78560343b940352b9b2c6727190a642c diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 21bc30faa4..3e0ff4db64 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -190,7 +190,7 @@ if [[ -d "${HOMEgfs}/sorc/gdas.cd" ]]; then cd "${HOMEgfs}/fix" || exit 1 [[ ! -d gdas ]] && mkdir -p gdas cd gdas || exit 1 - for gdas_sub in fv3jedi gsibec; do + for gdas_sub in fv3jedi gsibec obs; do if [[ -d "${gdas_sub}" ]]; then rm -rf "${gdas_sub}" fi diff --git a/ush/python/pygfs/task/land_analysis.py b/ush/python/pygfs/task/land_analysis.py index 821caf2305..90ab997de1 100644 --- a/ush/python/pygfs/task/land_analysis.py +++ b/ush/python/pygfs/task/land_analysis.py @@ -153,7 +153,7 @@ def prepare_IMS(self) -> None: # create a temporary dict of all keys needed in this method localconf = AttrDict() keys = ['DATA', 'current_cycle', 'COM_OBS', 'COM_ATMOS_RESTART_PREV', - 'OPREFIX', 'CASE', 'OCNRES', 'ntiles'] + 'OPREFIX', 'CASE', 'OCNRES', 'ntiles', 'FIXgdas'] for key in keys: localconf[key] = self.task_config[key] diff --git a/versions/fix.ver b/versions/fix.ver index 13d9b56dd2..f230188dbb 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -10,6 +10,7 @@ export datm_ver=20220805 export gdas_crtm_ver=20220805 export gdas_fv3jedi_ver=20220805 export gdas_gsibec_ver=20221031 +export gdas_obs_ver=20240213 export glwu_ver=20220805 export gsi_ver=20230911 export lut_ver=20220805 From cf83885548bb3a6740c033f42479ce2ad283a4a9 Mon Sep 17 00:00:00 2001 From: Jessica Meixner Date: Fri, 16 Feb 2024 01:55:02 -0500 Subject: [PATCH 15/16] Add unstructured grid for HR3/GFS (#2230) This adds the capability to use unstructured grids in the global workflow, which will be used in HR3. There are new fix files for a low-resolution 100km grid and a grid closer to our targeted GFSv17 grid which has the resolution combined from the older multi_1 and GFSv16 grids. The fix file update is here: NOAA-EMC/global-workflow#2229 Note: This now means that GFS tests need a new build option: `./build_all.sh -w` So that PDLIB=ON is turned on for compiling relevant UFS and WW3 codes. Resolves NOAA-EMC/global-workflow#1547 --- ci/cases/pr/C48_S2SWA_gefs.yaml | 5 ++++ ci/cases/pr/C48mx500_3DVarAOWCDA.yaml | 2 +- ci/cases/yamls/build.yaml | 2 +- ci/scripts/clone-build_ci.sh | 2 +- docs/source/clone.rst | 7 ++++++ env/WCOSS2.env | 2 ++ parm/config/gfs/config.base.emc.dyn | 8 +++---- parm/config/gfs/config.resources | 4 ++-- parm/config/gfs/config.stage_ic | 16 ++++++------- parm/config/gfs/config.ufs | 10 +++++++- parm/config/gfs/config.wave | 14 +++++++++++- parm/wave/at_10m_interp.inp.tmpl | 2 +- parm/wave/ep_10m_interp.inp.tmpl | 2 +- parm/wave/glo_15mxt_interp.inp.tmpl | 6 ++--- parm/wave/glo_200_interp.inp.tmpl | 12 ++++++++++ parm/wave/glo_30m_interp.inp.tmpl | 6 ++--- parm/wave/wc_10m_interp.inp.tmpl | 2 +- scripts/exgfs_wave_init.sh | 13 ++++++++--- sorc/build_all.sh | 14 ++++++++---- sorc/build_ufs.sh | 4 +++- sorc/build_ww3prepost.sh | 33 +++++++++++++++++++-------- ush/wave_grid_moddef.sh | 16 +++++++++---- versions/fix.ver | 2 +- 23 files changed, 130 insertions(+), 54 deletions(-) create mode 100755 parm/wave/glo_200_interp.inp.tmpl diff --git a/ci/cases/pr/C48_S2SWA_gefs.yaml b/ci/cases/pr/C48_S2SWA_gefs.yaml index d42f4cd15b..de677a70b6 100644 --- a/ci/cases/pr/C48_S2SWA_gefs.yaml +++ b/ci/cases/pr/C48_S2SWA_gefs.yaml @@ -16,3 +16,8 @@ arguments: idate: 2021032312 edate: 2021032312 yaml: {{ HOMEgfs }}/ci/cases/yamls/gefs_ci_defaults.yaml + +skip_ci_on_hosts: + - hera + - orion + - hercules diff --git a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml index 6e9fc6d3de..d9156e38f3 100644 --- a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml +++ b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml @@ -17,7 +17,7 @@ arguments: start: warm yaml: {{ HOMEgfs }}/ci/cases/yamls/soca_gfs_defaults_ci.yaml -skip_ci_on_host: +skip_ci_on_hosts: - orion - hera - hercules diff --git a/ci/cases/yamls/build.yaml b/ci/cases/yamls/build.yaml index 5398fa1889..2ff008d372 100644 --- a/ci/cases/yamls/build.yaml +++ b/ci/cases/yamls/build.yaml @@ -1,3 +1,3 @@ builds: - gefs: './build_all.sh' - - gfs: './build_all.sh -gu' \ No newline at end of file + - gfs: './build_all.sh -wgu' diff --git a/ci/scripts/clone-build_ci.sh b/ci/scripts/clone-build_ci.sh index 798c98bf50..989afabb80 100755 --- a/ci/scripts/clone-build_ci.sh +++ b/ci/scripts/clone-build_ci.sh @@ -74,7 +74,7 @@ set +e source "${HOMEgfs}/ush/module-setup.sh" export BUILD_JOBS=8 rm -rf log.build -./build_all.sh -gu >> log.build 2>&1 +./build_all.sh -guw >> log.build 2>&1 build_status=$? DATE=$(date +'%D %r') diff --git a/docs/source/clone.rst b/docs/source/clone.rst index bad3f0e9f6..4f47eb230f 100644 --- a/docs/source/clone.rst +++ b/docs/source/clone.rst @@ -39,6 +39,13 @@ For coupled cycling (include new UFSDA) use the `-gu` options during build: ./build_all.sh -gu +For building with PDLIB for the wave model, use the `-w` options during build: + +:: + + ./build_all.sh -w + + Build workflow components and link workflow artifacts such as executables, etc. :: diff --git a/env/WCOSS2.env b/env/WCOSS2.env index bbf4de2ae3..e247a37bc9 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -176,6 +176,8 @@ elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then if [[ "${step}" = "fcst" ]]; then export OMP_PLACES=cores export OMP_STACKSIZE=2048M + export MPICH_MPIIO_HINTS="*:romio_cb_write=disable" + export FI_OFI_RXM_SAR_LIMIT=3145728 elif [[ "${step}" = "efcs" ]]; then export MPICH_MPIIO_HINTS="*:romio_cb_write=disable" export FI_OFI_RXM_SAR_LIMIT=3145728 diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 48b66ab1b1..4301679fbb 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -183,16 +183,16 @@ export ICERES="${OCNRES}" # These are the currently recommended grid-combinations case "${CASE}" in "C48") - export waveGRD='glo_500' + export waveGRD='uglo_100km' ;; "C96" | "C192") - export waveGRD='glo_200' + export waveGRD='uglo_100km' ;; "C384") - export waveGRD='glo_025' + export waveGRD='uglo_100km' ;; "C768" | "C1152") - export waveGRD='mx025' + export waveGRD='uglo_m1g16' ;; *) echo "FATAL ERROR: Unrecognized CASE ${CASE}, ABORT!" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index ced6e6a3d8..afebc70ae2 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -737,7 +737,7 @@ case ${step} in export npe_genesis=1 export nth_genesis=1 export npe_node_genesis=1 - export memory_genesis="4G" + export memory_genesis="10G" ;; "genesis_fsu") @@ -745,7 +745,7 @@ case ${step} in export npe_genesis_fsu=1 export nth_genesis_fsu=1 export npe_node_genesis_fsu=1 - export memory_genesis_fsu="4G" + export memory_genesis_fsu="10G" ;; "fit2obs") diff --git a/parm/config/gfs/config.stage_ic b/parm/config/gfs/config.stage_ic index 7f3956af4d..dc224b72db 100644 --- a/parm/config/gfs/config.stage_ic +++ b/parm/config/gfs/config.stage_ic @@ -21,16 +21,16 @@ case "${CASE}" in export CPL_WAVIC=workflow_C384_refactored ;; "C768") - export CPL_ATMIC=HR2_refactored - export CPL_ICEIC=HR1_refactored - export CPL_OCNIC=HR1_refactored - export CPL_WAVIC=HR1_refactored + export CPL_ATMIC=HR3C768 + export CPL_ICEIC=HR3marine + export CPL_OCNIC=HR3marine + export CPL_WAVIC=HR3marine ;; "C1152") - export CPL_ATMIC=HR2_C1152_refactored - export CPL_ICEIC=HR3_refactored - export CPL_OCNIC=HR3_refactored - export CPL_WAVIC=HR1_refactored + export CPL_ATMIC=HR3C1152 + export CPL_ICEIC=HR3marine + export CPL_OCNIC=HR3marine + export CPL_WAVIC=HR3marine ;; *) echo "FATAL ERROR Unrecognized resolution: ${CASE}" diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index c8ce216899..2e299fddf7 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -15,7 +15,7 @@ if (( $# <= 1 )); then echo "--fv3 C48|C96|C192|C384|C768|C1152|C3072" echo "--mom6 500|100|025" echo "--cice6 500|100|025" - echo "--ww3 gnh_10m;aoc_9km;gsh_15m|gwes_30m|glo_025|glo_200|glo_500|mx025" + echo "--ww3 gnh_10m;aoc_9km;gsh_15m|gwes_30m|glo_025|glo_200|glo_500|mx025|uglo_100km|uglo_m1g16" echo "--gocart" exit 1 @@ -416,6 +416,14 @@ if [[ "${skip_ww3}" == "false" ]]; then "mx025") ntasks_ww3=80 ;; + "uglo_100km") + ntasks_ww3=40 + nthreads_ww3=1 + ;; + "uglo_m1g16") + ntasks_ww3=1000 + nthreads_ww3=1 + ;; *) echo "FATAL ERROR: Unsupported WW3 resolution = ${ww3_res}, ABORT!" exit 1 diff --git a/parm/config/gfs/config.wave b/parm/config/gfs/config.wave index acb4c518ba..74f5b78937 100644 --- a/parm/config/gfs/config.wave +++ b/parm/config/gfs/config.wave @@ -80,7 +80,19 @@ case "${waveGRD}" in export wavepostGRD='glo_500' export waveuoutpGRD=${waveGRD} ;; - *) + "uglo_100km") + #unstructured 100km grid + export waveinterpGRD='glo_200' + export wavepostGRD='' + export waveuoutpGRD=${waveGRD} + ;; + "uglo_m1g16") + #unstructured m1v16 grid + export waveinterpGRD='glo_15mxt' + export wavepostGRD='' + export waveuoutpGRD=${waveGRD} + ;; + *) echo "FATAL ERROR: No grid specific wave config values exist for ${waveGRD}. Aborting." exit 1 ;; diff --git a/parm/wave/at_10m_interp.inp.tmpl b/parm/wave/at_10m_interp.inp.tmpl index b2a80081e1..6f4c1f7099 100755 --- a/parm/wave/at_10m_interp.inp.tmpl +++ b/parm/wave/at_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'gnh_10m' + 'uglo_m1g16' 'at_10m' $ 0 diff --git a/parm/wave/ep_10m_interp.inp.tmpl b/parm/wave/ep_10m_interp.inp.tmpl index 0848854ccf..23cfd50c2e 100755 --- a/parm/wave/ep_10m_interp.inp.tmpl +++ b/parm/wave/ep_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'gnh_10m' + 'uglo_m1g16' 'ep_10m' $ 0 diff --git a/parm/wave/glo_15mxt_interp.inp.tmpl b/parm/wave/glo_15mxt_interp.inp.tmpl index 74bc9eebf4..19e9dae684 100755 --- a/parm/wave/glo_15mxt_interp.inp.tmpl +++ b/parm/wave/glo_15mxt_interp.inp.tmpl @@ -3,11 +3,9 @@ $------------------------------------------------ $ Start Time DT NSteps TIME DT NSTEPS $ Total number of grids - 4 + 2 $ Grid extensions - 'gnh_10m' - 'aoc_9km' - 'gsh_15m' + 'uglo_m1g16' 'glo_15mxt' $ 0 diff --git a/parm/wave/glo_200_interp.inp.tmpl b/parm/wave/glo_200_interp.inp.tmpl new file mode 100755 index 0000000000..c238a6fe0b --- /dev/null +++ b/parm/wave/glo_200_interp.inp.tmpl @@ -0,0 +1,12 @@ +$ Input file for interpolation of GLO30m_ext Grid +$------------------------------------------------ +$ Start Time DT NSteps + TIME DT NSTEPS +$ Total number of grids + 2 +$ Grid extensions + 'uglo_100km' + 'glo_200' +$ + 0 +$ diff --git a/parm/wave/glo_30m_interp.inp.tmpl b/parm/wave/glo_30m_interp.inp.tmpl index ea1baf7fc4..c62881202c 100755 --- a/parm/wave/glo_30m_interp.inp.tmpl +++ b/parm/wave/glo_30m_interp.inp.tmpl @@ -3,11 +3,9 @@ $------------------------------------------------ $ Start Time DT NSteps TIME DT NSTEPS $ Total number of grids - 4 + 2 $ Grid extensions - 'gnh_10m' - 'aoc_9km' - 'gsh_15m' + 'uglo_m1g16' 'glo_30m' $ 0 diff --git a/parm/wave/wc_10m_interp.inp.tmpl b/parm/wave/wc_10m_interp.inp.tmpl index abb51b4dfc..8338c91d0c 100755 --- a/parm/wave/wc_10m_interp.inp.tmpl +++ b/parm/wave/wc_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'gnh_10m' + 'uglo_m1g16' 'wc_10m' $ 0 diff --git a/scripts/exgfs_wave_init.sh b/scripts/exgfs_wave_init.sh index ce903a2284..03fd93bf48 100755 --- a/scripts/exgfs_wave_init.sh +++ b/scripts/exgfs_wave_init.sh @@ -118,6 +118,13 @@ source "${HOMEgfs}/ush/preamble.sh" err=2;export err;${errchk} fi + + if [ -f ${FIXwave}/${grdID}.msh ] + then + cp "${FIXwave}/${grdID}.msh" "${grdID}.msh" + fi + #TO DO: how do we say "it's unstructured, and therefore need to have error check here" + [[ ! -d "${COM_WAVE_PREP}" ]] && mkdir -m 775 -p "${COM_WAVE_PREP}" if [ ${CFP_MP:-"NO"} = "YES" ]; then echo "$nmoddef $USHwave/wave_grid_moddef.sh $grdID > $grdID.out 2>&1" >> cmdfile @@ -166,7 +173,7 @@ source "${HOMEgfs}/ush/preamble.sh" exit=$? fi - if [ "$exit" != '0' ] + if [[ "$exit" != '0' ]] then set +x echo ' ' @@ -195,9 +202,9 @@ source "${HOMEgfs}/ush/preamble.sh" echo '********************************************** ' echo '*** FATAL ERROR : NO MODEL DEFINITION FILE *** ' echo '********************************************** ' - echo " grdID = $grdID" + echo " grdID = ${grdID}" echo ' ' - sed "s/^/$grdID.out : /g" $grdID.out + sed "s/^/${grdID}.out : /g" "${grdID}.out" set_trace err=3;export err;${errchk} fi diff --git a/sorc/build_all.sh b/sorc/build_all.sh index c337374428..dd74c53487 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -16,7 +16,7 @@ function _usage() { Builds all of the global-workflow components by calling the individual build scripts in sequence. -Usage: ${BASH_SOURCE[0]} [-a UFS_app][-c build_config][-h][-j n][-v] +Usage: ${BASH_SOURCE[0]} [-a UFS_app][-c build_config][-h][-j n][-v][-w] -a UFS_app: Build a specific UFS app instead of the default -g: @@ -29,6 +29,8 @@ Usage: ${BASH_SOURCE[0]} [-a UFS_app][-c build_config][-h][-j n][-v] Build UFS-DA -v: Execute all build scripts with -v option to turn on verbose where supported + -w: + Use unstructured wave grid EOF exit 1 } @@ -40,10 +42,11 @@ _build_ufs_opt="" _build_ufsda="NO" _build_gsi="NO" _verbose_opt="" +_wave_unst="" _build_job_max=20 # Reset option counter in case this script is sourced OPTIND=1 -while getopts ":a:ghj:uv" option; do +while getopts ":a:ghj:uvw" option; do case "${option}" in a) _build_ufs_opt+="-a ${OPTARG} ";; g) _build_gsi="YES" ;; @@ -51,6 +54,7 @@ while getopts ":a:ghj:uv" option; do j) _build_job_max="${OPTARG} ";; u) _build_ufsda="YES" ;; v) _verbose_opt="-v";; + w) _wave_unst="-w";; :) echo "[${BASH_SOURCE[0]}]: ${option} requires an argument" _usage @@ -113,7 +117,7 @@ declare -A build_opts big_jobs=0 build_jobs["ufs"]=8 big_jobs=$((big_jobs+1)) -build_opts["ufs"]="${_verbose_opt} ${_build_ufs_opt}" +build_opts["ufs"]="${_wave_unst} ${_verbose_opt} ${_build_ufs_opt}" build_jobs["upp"]=6 # The UPP is hardcoded to use 6 cores build_opts["upp"]="" @@ -125,11 +129,11 @@ build_jobs["gfs_utils"]=1 build_opts["gfs_utils"]="${_verbose_opt}" build_jobs["ww3prepost"]=3 -build_opts["ww3prepost"]="${_verbose_opt} ${_build_ufs_opt}" +build_opts["ww3prepost"]="${_wave_unst} ${_verbose_opt} ${_build_ufs_opt}" # Optional DA builds if [[ "${_build_ufsda}" == "YES" ]]; then - if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera.intel" && "${MACHINE_ID}" != "hercules" ]]; then + if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" ]]; then echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build." else build_jobs["gdas"]=8 diff --git a/sorc/build_ufs.sh b/sorc/build_ufs.sh index 3e3f879f1a..24ee8c5f13 100755 --- a/sorc/build_ufs.sh +++ b/sorc/build_ufs.sh @@ -7,12 +7,13 @@ cwd=$(pwd) APP="S2SWA" CCPP_SUITES="FV3_GFS_v17_p8_ugwpv1,FV3_GFS_v17_coupled_p8_ugwpv1" # TODO: does the g-w need to build with all these CCPP_SUITES? -while getopts ":da:j:v" option; do +while getopts ":da:j:vw" option; do case "${option}" in d) BUILD_TYPE="DEBUG";; a) APP="${OPTARG}";; j) BUILD_JOBS="${OPTARG}";; v) export BUILD_VERBOSE="YES";; + w) PDLIB="ON";; :) echo "[${BASH_SOURCE[0]}]: ${option} requires an argument" ;; @@ -28,6 +29,7 @@ source "./tests/detect_machine.sh" source "./tests/module-setup.sh" MAKE_OPT="-DAPP=${APP} -D32BIT=ON -DCCPP_SUITES=${CCPP_SUITES}" +[[ ${PDLIB:-"OFF"} = "ON" ]] && MAKE_OPT+=" -DPDLIB=ON" [[ ${BUILD_TYPE:-"Release"} = "DEBUG" ]] && MAKE_OPT+=" -DDEBUG=ON" COMPILE_NR=0 CLEAN_BEFORE=YES diff --git a/sorc/build_ww3prepost.sh b/sorc/build_ww3prepost.sh index 919afaacb3..19cdba98da 100755 --- a/sorc/build_ww3prepost.sh +++ b/sorc/build_ww3prepost.sh @@ -6,12 +6,15 @@ cd "${script_dir}" || exit 1 # Default settings APP="S2SWA" +PDLIB="OFF" -while getopts ":j:a:v" option; do +while getopts ":j:a:dvw" option; do case "${option}" in a) APP="${OPTARG}";; + d) BUILD_TYPE="DEBUG";; j) BUILD_JOBS="${OPTARG}";; v) export BUILD_VERBOSE="YES";; + w) PDLIB="ON";; :) echo "[${BASH_SOURCE[0]}]: ${option} requires an argument" usage @@ -23,14 +26,16 @@ while getopts ":j:a:v" option; do esac done - # Determine which switch to use -if [[ "${APP}" == "ATMW" ]]; then +if [[ "${APP}" == "ATMW" ]]; then ww3switch="model/esmf/switch" -else - ww3switch="model/bin/switch_meshcap" -fi - +else + if [[ "${PDLIB}" == "ON" ]]; then + ww3switch="model/bin/switch_meshcap_pdlib" + else + ww3switch="model/bin/switch_meshcap" + fi +fi # Check final exec folder exists if [[ ! -d "../exec" ]]; then @@ -64,6 +69,8 @@ mkdir -p "${path_build}" || exit 1 cd "${path_build}" || exit 1 echo "Forcing a SHRD build" +buildswitch="${path_build}/switch" + cat "${SWITCHFILE}" > "${path_build}/tempswitch" sed -e "s/DIST/SHRD/g"\ @@ -73,15 +80,21 @@ sed -e "s/DIST/SHRD/g"\ -e "s/MPI / /g"\ -e "s/B4B / /g"\ -e "s/PDLIB / /g"\ + -e "s/SCOTCH / /g"\ + -e "s/METIS / /g"\ -e "s/NOGRB/NCEP2/g"\ "${path_build}/tempswitch" > "${path_build}/switch" rm "${path_build}/tempswitch" -echo "Switch file is ${path_build}/switch with switches:" -cat "${path_build}/switch" +echo "Switch file is ${buildswitch} with switches:" +cat "${buildswitch}" + +#define cmake build options +MAKE_OPT="-DCMAKE_INSTALL_PREFIX=install" +[[ ${BUILD_TYPE:-"Release"} = "DEBUG" ]] && MAKE_OPT+=" -DDEBUG=ON" #Build executables: -cmake "${WW3_DIR}" -DSWITCH="${path_build}/switch" -DCMAKE_INSTALL_PREFIX=install +cmake "${WW3_DIR}" -DSWITCH="${buildswitch}" "${MAKE_OPT}" rc=$? if (( rc != 0 )); then echo "Fatal error in cmake." diff --git a/ush/wave_grid_moddef.sh b/ush/wave_grid_moddef.sh index 5b1b212a16..f704c74af4 100755 --- a/ush/wave_grid_moddef.sh +++ b/ush/wave_grid_moddef.sh @@ -83,8 +83,16 @@ source "$HOMEgfs/ush/preamble.sh" rm -f ww3_grid.inp ln -sf ../ww3_grid.inp.$grdID ww3_grid.inp + + if [ -f ../${grdID}.msh ] + then + rm -f ${grdID}.msh + ln -sf ../${grdID}.msh ${grdID}.msh + fi + + - $EXECwave/ww3_grid 1> grid_${grdID}.out 2>&1 + "${EXECwave}/ww3_grid" 1> "grid_${grdID}.out" 2>&1 err=$? if [ "$err" != '0' ] @@ -99,10 +107,10 @@ source "$HOMEgfs/ush/preamble.sh" exit 3 fi - if [ -f mod_def.ww3 ] + if [[ -f mod_def.ww3 ]] then cp mod_def.ww3 "${COM_WAVE_PREP}/${RUN}wave.mod_def.${grdID}" - mv mod_def.ww3 ../mod_def.$grdID + mv mod_def.ww3 "../mod_def.${grdID}" else set +x echo ' ' @@ -118,6 +126,6 @@ source "$HOMEgfs/ush/preamble.sh" # 3. Clean up cd .. -rm -rf moddef_$grdID +rm -rf "moddef_${grdID}" # End of ww3_mod_def.sh ------------------------------------------------- # diff --git a/versions/fix.ver b/versions/fix.ver index f230188dbb..a2a9caf8e3 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -18,6 +18,6 @@ export mom6_ver=20231219 export orog_ver=20231027 export reg2grb2_ver=20220805 export sfc_climo_ver=20220805 -export ugwd_ver=20220805 +export ugwd_ver=20231027 export verif_ver=20220805 export wave_ver=20240105 From a23b7f2fdca5be700d257e28052a0104f2173a0f Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Fri, 16 Feb 2024 09:37:58 -0500 Subject: [PATCH 16/16] Add JEDI 3DEnVar atmosphere only CI test stub (#2309) --- ci/cases/pr/C48C48_ufs_hybatmDA.yaml | 22 ++++++++++++++++++++ ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml | 14 +++++++++++++ parm/config/gfs/config.base.emc.dyn | 7 +++++++ parm/config/gfs/config.esfc | 5 +++++ parm/config/gfs/config.nsst | 5 +++++ parm/config/gfs/config.resources | 20 ++++++++++-------- parm/config/gfs/config.sfcanl | 5 +++++ ush/python/pygfs/task/atm_analysis.py | 2 +- 8 files changed, 70 insertions(+), 10 deletions(-) create mode 100644 ci/cases/pr/C48C48_ufs_hybatmDA.yaml create mode 100644 ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml diff --git a/ci/cases/pr/C48C48_ufs_hybatmDA.yaml b/ci/cases/pr/C48C48_ufs_hybatmDA.yaml new file mode 100644 index 0000000000..7d3644b1af --- /dev/null +++ b/ci/cases/pr/C48C48_ufs_hybatmDA.yaml @@ -0,0 +1,22 @@ +experiment: + system: gfs + mode: cycled + +arguments: + pslot: {{ 'pslot' | getenv }} + app: ATM + resdetatmos: 48 + resensatmos: 48 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT + expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR + idate: 2021032312 + edate: 2021032400 + nens: 2 + gfs_cyc: 1 + start: warm + yaml: {{ HOMEgfs }}/ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml + +skip_ci_on_hosts: + - hera + - orion + - hercules diff --git a/ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml b/ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml new file mode 100644 index 0000000000..c072c391b3 --- /dev/null +++ b/ci/cases/yamls/ufs_hybatmDA_defaults.ci.yaml @@ -0,0 +1,14 @@ +defaults: + !INC {{ HOMEgfs }}/parm/config/gfs/yaml/defaults.yaml +base: + DOIAU: "NO" + DO_JEDIATMVAR: "YES" + DO_JEDIATMENS: "YES" + ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }} +esfc: + DONST: "NO" +nsst: + NST_MODEL: "1" +sfcanl: + DONST: "NO" + \ No newline at end of file diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 4301679fbb..16aed843ba 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -397,4 +397,11 @@ export FITSARC="YES" export FHMAX_FITS=132 [[ "${FHMAX_FITS}" -gt "${FHMAX_GFS}" ]] && export FHMAX_FITS=${FHMAX_GFS} +# The monitor jobs are not yet supported for JEDIATMVAR +if [[ ${DO_JEDIATMVAR} = "YES" ]]; then + export DO_VERFOZN="NO" # Ozone data assimilation monitoring + export DO_VERFRAD="NO" # Radiance data assimilation monitoring + export DO_VMINMON="NO" # GSI minimization monitoring +fi + echo "END: config.base" diff --git a/parm/config/gfs/config.esfc b/parm/config/gfs/config.esfc index 2bb3d48bb4..7c32313758 100644 --- a/parm/config/gfs/config.esfc +++ b/parm/config/gfs/config.esfc @@ -16,4 +16,9 @@ if [ $DOIAU_ENKF = "YES" ]; then export DOSFCANL_ENKF="NO" fi +# Turn off NST in JEDIATMENS +if [[ "${DO_JEDIATMENS}" == "YES" ]]; then + export DONST="NO" +fi + echo "END: config.esfc" diff --git a/parm/config/gfs/config.nsst b/parm/config/gfs/config.nsst index db4367b2c0..7bda81f058 100644 --- a/parm/config/gfs/config.nsst +++ b/parm/config/gfs/config.nsst @@ -10,6 +10,11 @@ echo "BEGIN: config.nsst" # nstf_name(1) : NST_MODEL (NSST Model) : 0 = OFF, 1 = ON but uncoupled, 2 = ON and coupled export NST_MODEL=2 +# Set NST_MODEL for JEDIATMVAR or JEDIATMENS +if [[ "${DO_JEDIATMVAR}" == "YES" || "${DO_JEDIATMENS}" == "YES" ]]; then + export NST_MODEL=1 +fi + # nstf_name(2) : NST_SPINUP : 0 = OFF, 1 = ON, export NST_SPINUP=0 cdate="${PDY}${cyc}" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index afebc70ae2..b06e634993 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -91,7 +91,7 @@ case ${step} in ;; "prepatmiodaobs") - export wtime_prepatmiodaobs="00:10:00" + export wtime_prepatmiodaobs="00:30:00" export npe_prepatmiodaobs=1 export nth_prepatmiodaobs=1 export npe_node_prepatmiodaobs=$(( npe_node_max / nth_prepatmiodaobs )) @@ -196,8 +196,8 @@ case ${step} in "atmanlinit") # make below case dependent later - export layout_x=1 - export layout_y=1 + export layout_x=8 + export layout_y=8 export layout_gsib_x=$(( layout_x * 3 )) export layout_gsib_y=$(( layout_y * 2 )) @@ -212,8 +212,8 @@ case ${step} in "atmanlrun") # make below case dependent later - export layout_x=1 - export layout_y=1 + export layout_x=8 + export layout_y=8 export wtime_atmanlrun="00:30:00" export npe_atmanlrun=$(( layout_x * layout_y * 6 )) @@ -221,6 +221,7 @@ case ${step} in export nth_atmanlrun=1 export nth_atmanlrun_gfs=${nth_atmanlrun} export npe_node_atmanlrun=$(( npe_node_max / nth_atmanlrun )) + export memory_atmanlrun="96GB" export is_exclusive=True ;; @@ -829,8 +830,8 @@ case ${step} in "atmensanlinit") # make below case dependent later - export layout_x=1 - export layout_y=1 + export layout_x=8 + export layout_y=8 export wtime_atmensanlinit="00:10:00" export npe_atmensanlinit=1 @@ -841,8 +842,8 @@ case ${step} in "atmensanlrun") # make below case dependent later - export layout_x=1 - export layout_y=1 + export layout_x=8 + export layout_y=8 export wtime_atmensanlrun="00:30:00" export npe_atmensanlrun=$(( layout_x * layout_y * 6 )) @@ -850,6 +851,7 @@ case ${step} in export nth_atmensanlrun=1 export nth_atmensanlrun_gfs=${nth_atmensanlrun} export npe_node_atmensanlrun=$(( npe_node_max / nth_atmensanlrun )) + export memory_atmensanlrun="96GB" export is_exclusive=True ;; diff --git a/parm/config/gfs/config.sfcanl b/parm/config/gfs/config.sfcanl index 9592fb77c9..e2fde8992a 100644 --- a/parm/config/gfs/config.sfcanl +++ b/parm/config/gfs/config.sfcanl @@ -8,4 +8,9 @@ echo "BEGIN: config.sfcanl" # Get task specific resources . $EXPDIR/config.resources sfcanl +# Turn off NST in JEDIATMVAR +if [[ "${DO_JEDIATMVAR}" == "YES" ]]; then + export DONST="NO" +fi + echo "END: config.sfcanl" diff --git a/ush/python/pygfs/task/atm_analysis.py b/ush/python/pygfs/task/atm_analysis.py index da41574fc9..6aed0533c6 100644 --- a/ush/python/pygfs/task/atm_analysis.py +++ b/ush/python/pygfs/task/atm_analysis.py @@ -94,7 +94,7 @@ def initialize(self: Analysis) -> None: 'NMEM_ENS', 'DATA', 'current_cycle', 'ntiles'] for key in keys: localconf[key] = self.task_config[key] - localconf.RUN = 'enkf' + self.task_config.RUN + localconf.RUN = 'enkfgdas' localconf.dirname = 'ens' FileHandler(self.get_fv3ens_dict(localconf)).sync()