diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 20e4a97f9c..3113c31149 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -17,7 +17,7 @@ on: jobs: documentation: - + permissions: pull-requests: 'write' @@ -56,7 +56,8 @@ jobs: path: artifact/doc_warnings.log if-no-files-found: ignore - - name: Comment ReadDocs + - name: Comment ReadDocs Link in PR + if: github.event_name == 'pull_request' uses: actions/github-script@v6 with: script: | @@ -68,5 +69,5 @@ jobs: issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: message - }) + body: message + }) diff --git a/.gitmodules b/.gitmodules index 3eb26fb0fe..ed2f92d45c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "sorc/ufs_model.fd"] path = sorc/ufs_model.fd - url = https://github.com/ufs-community/ufs-weather-model + url = https://github.com/NOAA-GSL/ufs-weather-model ignore = dirty [submodule "sorc/wxflow"] path = sorc/wxflow @@ -10,7 +10,7 @@ url = https://github.com/NOAA-EMC/gfs-utils [submodule "sorc/ufs_utils.fd"] path = sorc/ufs_utils.fd - url = https://github.com/ufs-community/UFS_UTILS.git + url = https://github.com/NOAA-GSL/UFS_UTILS.git [submodule "sorc/verif-global.fd"] path = sorc/verif-global.fd url = https://github.com/NOAA-EMC/EMC_verif-global.git diff --git a/INFO b/INFO new file mode 100644 index 0000000000..86f53453bb --- /dev/null +++ b/INFO @@ -0,0 +1,26 @@ +03-15-24 +======== + 12Jan24 global-workflow + UFS: 29Jan24, 625ac02 + FV3: 29Jan24, bd38c56 + UPP: 07Nov23, 78f369b + UFS_UTILS: 22Dev23, ce385ce + + update pointers for ufs_model and ufs_utils + add GSL C3_MYNN xml suite + Change dt_inner=75 for any configuration running aerosol-aware Thompson (ltaerosol=.true.) + +02-13-24 +======== + 12Jan24 global-workflow + UFS: 29Jan24, 625ac02 + UPP: 07Nov23, 78f369b + +01-08-24 +======== + 02Jan24 global-workflow + UFS: 21Dec23, 991d652 + UPP: 07Nov23, 78f369b + + use submodules instead of checkout.sh + gfsatmos_products replaces gfspost tasks diff --git a/ci/cases/pr/C48_ATM.yaml b/ci/cases/pr/C48_ATM.yaml index fc0b729af6..39412e8aeb 100644 --- a/ci/cases/pr/C48_ATM.yaml +++ b/ci/cases/pr/C48_ATM.yaml @@ -5,8 +5,8 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: ATM - resdet: 48 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 48 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR idate: 2021032312 edate: 2021032312 diff --git a/ci/cases/pr/C48_S2SW.yaml b/ci/cases/pr/C48_S2SW.yaml index f4b50ead22..2aba42f562 100644 --- a/ci/cases/pr/C48_S2SW.yaml +++ b/ci/cases/pr/C48_S2SW.yaml @@ -5,8 +5,9 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: S2SW - resdet: 48 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 48 + resdetocean: 5.0 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR idate: 2021032312 edate: 2021032312 diff --git a/ci/cases/pr/C48_S2SWA_gefs.yaml b/ci/cases/pr/C48_S2SWA_gefs.yaml index 5eb99d9c1e..d68360bf44 100644 --- a/ci/cases/pr/C48_S2SWA_gefs.yaml +++ b/ci/cases/pr/C48_S2SWA_gefs.yaml @@ -5,12 +5,13 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: S2SWA - resdet: 48 - resens: 48 + resdetatmos: 48 + resdetocean: 5.0 + resensatmos: 48 nens: 2 gfs_cyc: 1 start: cold - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR idate: 2021032312 edate: 2021032312 diff --git a/ci/cases/pr/C96C48_hybatmDA.yaml b/ci/cases/pr/C96C48_hybatmDA.yaml index 1f3e973ae7..be35283cff 100644 --- a/ci/cases/pr/C96C48_hybatmDA.yaml +++ b/ci/cases/pr/C96C48_hybatmDA.yaml @@ -5,9 +5,10 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: ATM - resdet: 96 - resens: 48 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 96 + resdetocean: 5.0 + resensatmos: 48 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C96C48 idate: 2021122018 @@ -16,6 +17,3 @@ arguments: gfs_cyc: 1 start: cold yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml - -skip_ci_on_hosts: - - hercules diff --git a/ci/cases/pr/C96_atm3DVar.yaml b/ci/cases/pr/C96_atm3DVar.yaml index 360e81e9d7..dee1525d80 100644 --- a/ci/cases/pr/C96_atm3DVar.yaml +++ b/ci/cases/pr/C96_atm3DVar.yaml @@ -5,16 +5,13 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: ATM - resdet: 96 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 96 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR - icsdir: ${ICSDIR_ROOT}/C96C48 + icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C96C48 idate: 2021122018 edate: 2021122106 nens: 0 gfs_cyc: 1 start: cold yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml - -skip_ci_on_hosts: - - hercules diff --git a/ci/cases/weekly/C384C192_hybatmda.yaml b/ci/cases/weekly/C384C192_hybatmda.yaml index 4c14018e2d..a4eae7d9a1 100644 --- a/ci/cases/weekly/C384C192_hybatmda.yaml +++ b/ci/cases/weekly/C384C192_hybatmda.yaml @@ -5,9 +5,10 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: ATM - resdet: 384 - resens: 192 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 384 + resdetocean: 0.25 + resensatmos: 192 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C384C192 idate: 2023040118 diff --git a/ci/cases/weekly/C384_S2SWA.yaml b/ci/cases/weekly/C384_S2SWA.yaml index 6c624f5698..8e2c043a4c 100644 --- a/ci/cases/weekly/C384_S2SWA.yaml +++ b/ci/cases/weekly/C384_S2SWA.yaml @@ -5,8 +5,9 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: S2SWA - resdet: 384 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 384 + resdetocean: 0.5 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR idate: 2016070100 edate: 2016070100 diff --git a/ci/cases/weekly/C384_atm3DVar.yaml b/ci/cases/weekly/C384_atm3DVar.yaml index e7986ef097..479d731b25 100644 --- a/ci/cases/weekly/C384_atm3DVar.yaml +++ b/ci/cases/weekly/C384_atm3DVar.yaml @@ -5,9 +5,10 @@ experiment: arguments: pslot: {{ 'pslot' | getenv }} app: ATM - resdet: 384 - resens: 192 - comrot: {{ 'RUNTESTS' | getenv }}/COMROT + resdetatmos: 384 + resdetocean: 0.25 + resensatmos: 192 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C384C192 idate: 2023040118 diff --git a/ci/platforms/gefs_ci_defaults.yaml b/ci/platforms/gefs_ci_defaults.yaml index 2aa30d6be4..dfb1626cdd 100644 --- a/ci/platforms/gefs_ci_defaults.yaml +++ b/ci/platforms/gefs_ci_defaults.yaml @@ -1,4 +1,4 @@ defaults: !INC {{ HOMEgfs }}/parm/config/gefs/yaml/defaults.yaml base: - ACCOUNT: ${SLURM_ACCOUNT} + ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }} diff --git a/ci/platforms/gfs_defaults_ci.yaml b/ci/platforms/gfs_defaults_ci.yaml index 5e57e617ec..b66be2a366 100644 --- a/ci/platforms/gfs_defaults_ci.yaml +++ b/ci/platforms/gfs_defaults_ci.yaml @@ -1,4 +1,4 @@ defaults: !INC {{ HOMEgfs }}/parm/config/gfs/yaml/defaults.yaml base: - ACCOUNT: ${SLURM_ACCOUNT} + ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }} diff --git a/ci/scripts/check_ci.sh b/ci/scripts/check_ci.sh index 164d423c67..cda2d4e9f2 100755 --- a/ci/scripts/check_ci.sh +++ b/ci/scripts/check_ci.sh @@ -22,7 +22,7 @@ REPO_URL="https://github.com/NOAA-EMC/global-workflow.git" source "${ROOT_DIR}/ush/detect_machine.sh" case ${MACHINE_ID} in - hera | orion) + hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" ;; @@ -149,7 +149,7 @@ for pr in ${pr_list}; do if [[ "${num_done}" -eq "${num_cycles}" ]]; then #Remove Experment cases that completed successfully rm -Rf "${pslot_dir}" - rm -Rf "${pr_dir}/RUNTESTS/COMROT/${pslot}" + rm -Rf "${pr_dir}/RUNTESTS/COMROOT/${pslot}" rm -f "${output_ci_single}" # echo "\`\`\`" > "${output_ci_single}" DATE=$(date +'%D %r') diff --git a/ci/scripts/driver.sh b/ci/scripts/driver.sh index a0edb4b4c3..5fc13ea524 100755 --- a/ci/scripts/driver.sh +++ b/ci/scripts/driver.sh @@ -34,7 +34,7 @@ export PS4='+ $(basename ${BASH_SOURCE})[${LINENO}]' source "${ROOT_DIR}/ush/detect_machine.sh" case ${MACHINE_ID} in - hera | orion) + hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" ;; @@ -173,7 +173,7 @@ for pr in ${pr_list}; do # we need to exit this instance of the driver script ################################################################# if [[ ${ci_status} -ne 0 ]]; then - build_PID_check=$("${ROOT_DIR}/ci/scripts/pr_list_database.py" --display "{pr}" --dbfile "${pr_list_dbfile}" | awk '{print $4}' | cut -d":" -f1) || true + build_PID_check=$("${ROOT_DIR}/ci/scripts/pr_list_database.py" --display "${pr}" --dbfile "${pr_list_dbfile}" | awk '{print $4}' | cut -d":" -f1) || true if [[ "${build_PID_check}" -ne "$$" ]]; then echo "Driver build PID: ${build_PID_check} no longer running this build ... exiting" exit 0 diff --git a/ci/scripts/driver_weekly.sh b/ci/scripts/driver_weekly.sh index 88b027d100..9460e0b0a4 100755 --- a/ci/scripts/driver_weekly.sh +++ b/ci/scripts/driver_weekly.sh @@ -38,7 +38,7 @@ export PS4='+ $(basename ${BASH_SOURCE[0]})[${LINENO}]' source "${ROOT_DIR}/ush/detect_machine.sh" case ${MACHINE_ID} in - hera | orion) + hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" ;; diff --git a/ci/scripts/run-check_ci.sh b/ci/scripts/run-check_ci.sh index a5c5369ef7..5a909c1c64 100755 --- a/ci/scripts/run-check_ci.sh +++ b/ci/scripts/run-check_ci.sh @@ -12,12 +12,12 @@ pslot=${2:-${pslot:-?}} # Name of the experiment being tested by this scr # TEST_DIR contains 2 directories; # 1. HOMEgfs: clone of the global-workflow -# 2. RUNTESTS: A directory containing EXPDIR and COMROT for experiments +# 2. RUNTESTS: A directory containing EXPDIR and COMROOT for experiments # # e.g. $> tree ./TEST_DIR # ./TEST_DIR # ├── HOMEgfs # └── RUNTESTS -# ├── COMROT +# ├── COMROOT # │   └── ${pslot} # └── EXPDIR # └── ${pslot} diff --git a/ci/scripts/run_ci.sh b/ci/scripts/run_ci.sh index cdaafb337f..4a390a23f2 100755 --- a/ci/scripts/run_ci.sh +++ b/ci/scripts/run_ci.sh @@ -20,7 +20,7 @@ export PS4='+ $(basename ${BASH_SOURCE})[${LINENO}]' source "${ROOT_DIR}/ush/detect_machine.sh" case ${MACHINE_ID} in - hera | orion) + hera | orion | hercules) echo "Running Automated Testing on ${MACHINE_ID}" source "${ROOT_DIR}/ci/platforms/config.${MACHINE_ID}" ;; diff --git a/docs/doxygen/mainpage.h b/docs/doxygen/mainpage.h index 40e8e6f946..19a51be272 100644 --- a/docs/doxygen/mainpage.h +++ b/docs/doxygen/mainpage.h @@ -12,7 +12,7 @@ With the FV3GFS system, a Rocoto driven workflow is being used. This workflow builds on the experiences and work of Kate.Howard and Terry.McGuinness for the GSM, but strips down a lot of complications arising from the use of a cumbersome \c para_config. The \c para_config no longer exists in the workflow and instead a number of \c config files are added, one each for an individual task e.g. \p config.anal contains the analysis specific information. A base config called as \c config.base contains information related to the machine, super-structure, etc. The idea behind splitting the \p para_config into individual, smaller and managable configs is to provide a means to run any chosen task without the overhead of the full cycling framework. All the configs are located under \c fv3gfs/config -Additionally, the structure of the \c COMROT directory is now modified to look like operations. This enables the use of the workflow much closer to the operational environment, with the exception of the workflow manager. +Additionally, the structure of the \c ROTDIR directory is now modified to look like operations. This enables the use of the workflow much closer to the operational environment, with the exception of the workflow manager. This is a very much a work in progress and any issues should be reported back and are greatly appreciated. @@ -23,12 +23,12 @@ To setup an experiment, a python script \c setup_expt.py (located in \ $> setup_expt.py -h usage: setup_expt.py [-h] --pslot PSLOT [--configdir CONFIGDIR] [--idate IDATE] [--icsdir ICSDIR] - [--resdet RESDET] [--resens RESENS] [--comrot COMROT] + [--resdetatmos RESDET] [--resensatmos RESENS] [--comroot COMROOT] [--expdir EXPDIR] [--nens NENS] [--cdump CDUMP] Setup files and directories to start a GFS parallel. Create EXPDIR, copy - config files Create COMROT experiment directory structure, link initial - condition files from $ICSDIR to $COMROT + config files Create ROTDIR experiment directory structure, link initial + condition files from $ICSDIR to $ROTDIR optional arguments: -h, --help show this help message and exit @@ -40,11 +40,13 @@ To setup an experiment, a python script \c setup_expt.py (located in \ (default: 2016100100) --icsdir full path to initial condition directory (default: /scratch4/NCEPDEV/da/noscrub/Rahul.Mahajan/ICS) - --resdet resolution of the deterministic model forecast + --resdetatmos atmosphere resolution of the deterministic model forecast (default: 384) - --resens resolution of the ensemble model forecast + --resdetocean ocean resolution of the deterministic model forecast + (default: 0. [determined automatically based on atmosphere resolution]) + --resensatmos resolution of the ensemble model forecast (default: 192) - --comrot full path to COMROT + --comroot full path to COMROOT, where ROTDIR (COMROOT+PSLOT) will be created (default: None) --expdir full path to EXPDIR (default: None) @@ -53,7 +55,7 @@ To setup an experiment, a python script \c setup_expt.py (located in \ --cdump CDUMP to start the experiment (default: gdas) -The above script creates directories \c EXPDIR and \c COMROT. It will make links for initial conditions from a location provided via the \c --icsdir argument for a chosen resolution for the control \c --resdet and the ensemble \c --resens. Experiment name is controlled by the input argument \c --pslot. The script will ask user input in case any of the directories already exist. It will copy experiment configuration files into the \c EXPDIR from \c CONFIGDIR. +The above script creates directories \c EXPDIR and \c ROTDIR. It will make links for initial conditions from a location provided via the \c --icsdir argument for a chosen resolution for the control \c --resdetatmos and the ensemble \c --resensatmos. Experiment name is controlled by the input argument \c --pslot. The script will ask user input in case any of the directories already exist. It will copy experiment configuration files into the \c EXPDIR from \c CONFIGDIR. Sample initial conditions for a few resolutions are available at:
Hera: TODO: /path/here/for/initial/conditions
diff --git a/docs/source/errors_faq.rst b/docs/source/errors_faq.rst index 2660a01e60..519e29bace 100644 --- a/docs/source/errors_faq.rst +++ b/docs/source/errors_faq.rst @@ -36,10 +36,10 @@ Example:: **Solution:** set TERM to "xterm" (bash: export TERM=xterm ; csh/tcsh: setenv TERM xterm) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Issue: Directory name change for EnKF folder in COMROT +Issue: Directory name change for EnKF folder in ROTDIR ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -**Issue:** The EnKF COMROT folders were renamed during the GFS v15 development process to remove the period between "enkf" and "gdas": enkf.gdas.$PDY → enkfgdas.$PDY +**Issue:** The EnKF ROTDIR folders were renamed during the GFS v15 development process to remove the period between "enkf" and "gdas": enkf.gdas.$PDY → enkfgdas.$PDY **Fix:** Older tarballs on HPSS will have the older directory name with the period between 'enkf' and 'gdas'. Make sure to rename folder to 'enkfgdas.$PDY' after obtaining. Only an issue for the initial cycle. diff --git a/docs/source/init.rst b/docs/source/init.rst index 65e400c68e..14a0ea0d56 100644 --- a/docs/source/init.rst +++ b/docs/source/init.rst @@ -391,7 +391,7 @@ The chgres jobs will have a dependency on the data-pull jobs and will wait to ru 5. Check output: -In the config you will have defined an output folder called ``$OUTDIR``. The converted output will be found there, including the needed abias and radstat initial condition files (if CDUMP=gdas). The files will be in the needed directory structure for the global-workflow system, therefore a user can move the contents of their ``$OUTDIR`` directly into their ``$ROTDIR/$COMROT``. +In the config you will have defined an output folder called ``$OUTDIR``. The converted output will be found there, including the needed abias and radstat initial condition files (if CDUMP=gdas). The files will be in the needed directory structure for the global-workflow system, therefore a user can move the contents of their ``$OUTDIR`` directly into their ``$ROTDIR``. Please report bugs to George Gayno (george.gayno@noaa.gov) and Kate Friedman (kate.friedman@noaa.gov). @@ -449,7 +449,7 @@ The warm starts and other output from production are at C768 deterministic and C What files should you pull for starting a new experiment with warm starts from production? ------------------------------------------------------------------------------------------ -That depends on what mode you want to run -- forecast-only or cycled. Whichever mode, navigate to the top of your ``COMROT`` and pull the entirety of the tarball(s) listed below for your mode. The files within the tarball are already in the ``$CDUMP.$PDY/$CYC/$ATMOS`` folder format expected by the system. +That depends on what mode you want to run -- forecast-only or cycled. Whichever mode, navigate to the top of your ``ROTDIR`` and pull the entirety of the tarball(s) listed below for your mode. The files within the tarball are already in the ``$CDUMP.$PDY/$CYC/$ATMOS`` folder format expected by the system. For forecast-only there are two tarballs to pull @@ -489,7 +489,7 @@ Tarballs per cycle: com_gfs_vGFSVER_enkfgdas.YYYYMMDD_CC.enkfgdas_restart_grp7.tar com_gfs_vGFSVER_enkfgdas.YYYYMMDD_CC.enkfgdas_restart_grp8.tar -Go to the top of your ``COMROT/ROTDIR`` and pull the contents of all tarballs there. The tarballs already contain the needed directory structure. +Go to the top of your ``ROTDIR`` and pull the contents of all tarballs there. The tarballs already contain the needed directory structure. .. _warmstarts-preprod-parallels: @@ -514,7 +514,7 @@ Recent pre-implementation parallel series was for GFS v16 (implemented March 202 + ../$GDATE/gdas_restartb.tar + ../$GDATE/enkfgdas_restartb_grp##.tar (where ## is 01 through 08) (note, older tarballs may include a period between enkf and gdas: "enkf.gdas") -* **Where do I put the warm-start initial conditions?** Extraction should occur right inside your COMROT. You may need to rename the enkf folder (enkf.gdas.$PDY -> enkfgdas.$PDY). +* **Where do I put the warm-start initial conditions?** Extraction should occur right inside your ROTDIR. You may need to rename the enkf folder (enkf.gdas.$PDY -> enkfgdas.$PDY). Due to a recent change in the dycore, you may also need an additional offline step to fix the checksum of the NetCDF files for warm start. See the :ref:`Fix netcdf checksum section `. @@ -602,5 +602,5 @@ And then on all platforms: :: - cd $COMROT + cd $ROTDIR for f in $(find ./ -name *tile*.nc); do echo $f; ncatted -a checksum,,d,, $f; done diff --git a/docs/source/setup.rst b/docs/source/setup.rst index be04aa5d96..0e87ade9a5 100644 --- a/docs/source/setup.rst +++ b/docs/source/setup.rst @@ -28,8 +28,8 @@ The following command examples include variables for reference but users should :: cd workflow - ./setup_expt.py gfs forecast-only --idate $IDATE --edate $EDATE [--app $APP] [--start $START] [--gfs_cyc $GFS_CYC] [--resdet $RESDET] - [--pslot $PSLOT] [--configdir $CONFIGDIR] [--comrot $COMROT] [--expdir $EXPDIR] + ./setup_expt.py gfs forecast-only --idate $IDATE --edate $EDATE [--app $APP] [--start $START] [--gfs_cyc $GFS_CYC] [--resdetatmos $RESDETATMOS] [--resdetocean $RESDETOCEAN] + [--pslot $PSLOT] [--configdir $CONFIGDIR] [--comroot $COMROOT] [--expdir $EXPDIR] where: @@ -50,9 +50,10 @@ where: * ``$EDATE`` is the ending date of your run (YYYYMMDDCC) and is the last cycle that will complete * ``$PSLOT`` is the name of your experiment [default: test] * ``$CONFIGDIR`` is the path to the ``/config`` folder under the copy of the system you're using [default: $TOP_OF_CLONE/parm/config/] - * ``$RESDET`` is the FV3 resolution (i.e. 768 for C768) [default: 384] + * ``$RESDETATMOS`` is the resolution of the atmosphere component of the system (i.e. 768 for C768) [default: 384] + * ``$RESDETOCEAN`` is the resolution of the ocean component of the system (i.e. 0.25 for 1/4 degree) [default: 0.; determined based on atmosphere resolution] * ``$GFS_CYC`` is the forecast frequency (0 = none, 1 = 00z only [default], 2 = 00z & 12z, 4 = all cycles) - * ``$COMROT`` is the path to your experiment output directory. DO NOT include PSLOT folder at end of path, it’ll be built for you. [default: $HOME (but do not use default due to limited space in home directories normally, provide a path to a larger scratch space)] + * ``$COMROOT`` is the path to your experiment output directory. Your ``ROTDIR`` (rotating com directory) will be created using ``COMROOT`` and ``PSLOT``. [default: $HOME (but do not use default due to limited space in home directories normally, provide a path to a larger scratch space)] * ``$EXPDIR`` is the path to your experiment directory where your configs will be placed and where you will find your workflow monitoring files (i.e. rocoto database and xml file). DO NOT include PSLOT folder at end of path, it will be built for you. [default: $HOME] Examples: @@ -62,21 +63,21 @@ Atm-only: :: cd workflow - ./setup_expt.py gfs forecast-only --pslot test --idate 2020010100 --edate 2020010118 --resdet 384 --gfs_cyc 4 --comrot /some_large_disk_area/Joe.Schmo/comrot --expdir /some_safe_disk_area/Joe.Schmo/expdir + ./setup_expt.py gfs forecast-only --pslot test --idate 2020010100 --edate 2020010118 --resdetatmos 384 --gfs_cyc 4 --comroot /some_large_disk_area/Joe.Schmo/comroot --expdir /some_safe_disk_area/Joe.Schmo/expdir Coupled: :: cd workflow - ./setup_expt.py gfs forecast-only --app S2SW --pslot coupled_test --idate 2013040100 --edate 2013040100 --resdet 384 --comrot /some_large_disk_area/Joe.Schmo/comrot --expdir /some_safe_disk_area/Joe.Schmo/expdir + ./setup_expt.py gfs forecast-only --app S2SW --pslot coupled_test --idate 2013040100 --edate 2013040100 --resdetatmos 384 --comroot /some_large_disk_area/Joe.Schmo/comroot --expdir /some_safe_disk_area/Joe.Schmo/expdir Coupled with aerosols: :: cd workflow - ./setup_expt.py gfs forecast-only --app S2SWA --pslot coupled_test --idate 2013040100 --edate 2013040100 --resdet 384 --comrot /some_large_disk_area/Joe.Schmo/comrot --expdir /some_safe_disk_area/Joe.Schmo/expdir + ./setup_expt.py gfs forecast-only --app S2SWA --pslot coupled_test --idate 2013040100 --edate 2013040100 --resdetatmos 384 --comroot /some_large_disk_area/Joe.Schmo/comroot --expdir /some_safe_disk_area/Joe.Schmo/expdir **************************************** Step 2: Set user and experiment settings @@ -134,14 +135,14 @@ Scripts that will be used: Step 1) Run experiment generator script *************************************** -The following command examples include variables for reference but users should not use environmental variables but explicit values to submit the commands. Exporting variables like EXPDIR to your environment causes an error when the python scripts run. Please explicitly include the argument inputs when running both setup scripts: +The following command examples include variables for reference but users should not use environment variables but explicit values to submit the commands. Exporting variables like EXPDIR to your environment causes an error when the python scripts run. Please explicitly include the argument inputs when running both setup scripts: :: cd workflow ./setup_expt.py gfs cycled --idate $IDATE --edate $EDATE [--app $APP] [--start $START] [--gfs_cyc $GFS_CYC] - [--resdet $RESDET] [--resens $RESENS] [--nens $NENS] [--cdump $CDUMP] - [--pslot $PSLOT] [--configdir $CONFIGDIR] [--comrot $COMROT] [--expdir $EXPDIR] [--icsdir $ICSDIR] + [--resdetatmos $RESDETATMOS] [--resdetocean $RESDETOCEAN] [--resensatmos $RESENSATMOS] [--nens $NENS] [--cdump $CDUMP] + [--pslot $PSLOT] [--configdir $CONFIGDIR] [--comroot $COMROOT] [--expdir $EXPDIR] [--icsdir $ICSDIR] where: @@ -161,13 +162,14 @@ where: * ``$EDATE`` is the ending date of your run (YYYYMMDDCC) and is the last cycle that will complete * ``$START`` is the start type (warm or cold [default]) * ``$GFS_CYC`` is the forecast frequency (0 = none, 1 = 00z only [default], 2 = 00z & 12z, 4 = all cycles) - * ``$RESDET`` is the FV3 resolution of the deterministic forecast [default: 384] - * ``$RESENS`` is the FV3 resolution of the ensemble (EnKF) forecast [default: 192] + * ``$RESDETATMOS`` is the resolution of the atmosphere component of the deterministic forecast [default: 384] + * ``$RESDETOCEAN`` is the resolution of the ocean component of the deterministic forecast [default: 0.; determined based on atmosphere resolution] + * ``$RESENSATMOS`` is the resolution of the atmosphere component of the ensemble forecast [default: 192] * ``$NENS`` is the number of ensemble members [default: 20] * ``$CDUMP`` is the starting phase [default: gdas] * ``$PSLOT`` is the name of your experiment [default: test] * ``$CONFIGDIR`` is the path to the config folder under the copy of the system you're using [default: $TOP_OF_CLONE/parm/config/] - * ``$COMROT`` is the path to your experiment output directory. DO NOT include PSLOT folder at end of path, it’ll be built for you. [default: $HOME] + * ``$COMROOT`` is the path to your experiment output directory. Your ``ROTDIR`` (rotating com directory) will be created using ``COMROOT`` and ``PSLOT``. [default: $HOME] * ``$EXPDIR`` is the path to your experiment directory where your configs will be placed and where you will find your workflow monitoring files (i.e. rocoto database and xml file). DO NOT include PSLOT folder at end of path, it will be built for you. [default: $HOME] * ``$ICSDIR`` is the path to the ICs for your run if generated separately. [default: None] @@ -178,13 +180,13 @@ Example: :: cd workflow - ./setup_expt.py gfs cycled --pslot test --configdir /home/Joe.Schmo/git/global-workflow/parm/config --idate 2020010100 --edate 2020010118 --comrot /some_large_disk_area/Joe.Schmo/comrot --expdir /some_safe_disk_area/Joe.Schmo/expdir --resdet 384 --resens 192 --nens 80 --gfs_cyc 4 + ./setup_expt.py gfs cycled --pslot test --configdir /home/Joe.Schmo/git/global-workflow/parm/config --idate 2020010100 --edate 2020010118 --comroot /some_large_disk_area/Joe.Schmo/comroot --expdir /some_safe_disk_area/Joe.Schmo/expdir --resdetatmos 384 --resensatmos 192 --nens 80 --gfs_cyc 4 Example ``setup_expt.py`` on Orion: :: - Orion-login-3$ ./setup_expt.py gfs cycled --pslot test --idate 2022010118 --edate 2022010200 --resdet 192 --resens 96 --nens 80 --comrot /work/noaa/stmp/jschmo/comrot --expdir /work/noaa/global/jschmo/expdir + Orion-login-3$ ./setup_expt.py gfs cycled --pslot test --idate 2022010118 --edate 2022010200 --resdetatmos 192 --resensatmos 96 --nens 80 --comroot /work/noaa/stmp/jschmo/comroot --expdir /work/noaa/global/jschmo/expdir EDITED: /work/noaa/global/jschmo/expdir/test/config.base as per user input. EDITED: /work/noaa/global/jschmo/expdir/test/config.aeroanl as per user input. EDITED: /work/noaa/global/jschmo/expdir/test/config.ocnanal as per user input. @@ -195,9 +197,9 @@ What happens if I run ``setup_expt.py`` again for an experiment that already exi :: - Orion-login-3$ ./setup_expt.py gfs cycled --pslot test --idate 2022010118 --edate 2022010200 --resdet 192 --resens 96 --nens 80 --comrot /work/noaa/stmp/jschmo/comrot --expdir /work/noaa/global/jschmo/expdir + Orion-login-3$ ./setup_expt.py gfs cycled --pslot test --idate 2022010118 --edate 2022010200 --resdetatmos 192 --resensatmos 96 --nens 80 --comroot /work/noaa/stmp/jschmo/comroot --expdir /work/noaa/global/jschmo/expdir - directory already exists in /work/noaa/stmp/jschmo/comrot/test + directory already exists in /work/noaa/stmp/jschmo/comroot/test Do you wish to over-write [y/N]: y @@ -208,7 +210,7 @@ What happens if I run ``setup_expt.py`` again for an experiment that already exi EDITED: /work/noaa/global/jschmo/expdir/test/config.aeroanl as per user input. EDITED: /work/noaa/global/jschmo/expdir/test/config.ocnanal as per user input. -Your ``COMROT`` and ``EXPDIR`` will be deleted and remade. Be careful with this! +Your ``ROTDIR`` and ``EXPDIR`` will be deleted and remade. Be careful with this! **************************************** Step 2: Set user and experiment settings diff --git a/docs/source/start.rst b/docs/source/start.rst index 7dc093e8a4..fb3c3ef316 100644 --- a/docs/source/start.rst +++ b/docs/source/start.rst @@ -16,7 +16,7 @@ Start your run from within your EXPDIR The first jobs of your run should now be queued or already running (depending on machine traffic). How exciting! -You'll now have a "logs" folder in both your COMROT and EXPDIR. The EXPDIR log folder contains workflow log files (e.g. rocoto command results) and the COMROT log folder will contain logs for each job (previously known as dayfiles). +You'll now have a "logs" folder in both your ``ROTDIR`` and ``EXPDIR``. The EXPDIR log folder contains workflow log files (e.g. rocoto command results) and the ``ROTDIR`` log folder will contain logs for each job (previously known as dayfiles). ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Set up your experiment cron diff --git a/docs/source/view.rst b/docs/source/view.rst index 3093755e9a..e321936274 100644 --- a/docs/source/view.rst +++ b/docs/source/view.rst @@ -2,7 +2,7 @@ View Experiment output ====================== -The output from your run will be found in the ``COMROT/ROTDIR`` you established. This is also where you placed your initial conditions. Within your ``COMROT`` you will have the following directory structure (based on the type of experiment you run): +The output from your run will be found in the ``ROTDIR`` you established. This is also where you placed your initial conditions. Within your ``ROTDIR`` you will have the following directory structure (based on the type of experiment you run): ^^^^^^^^^^^^^ Forecast-only @@ -29,11 +29,11 @@ Cycled logs/ <- logs for each cycle in the run vrfyarch/ <- contains files related to verification and archival -Here is an example ``COMROT`` for a cycled run as it may look several cycles in (note the archival steps remove older cycle folders as the run progresses): +Here is an example ``ROTDIR`` for a cycled run as it may look several cycles in (note the archival steps remove older cycle folders as the run progresses): :: - -bash-4.2$ ll /scratch1/NCEPDEV/stmp4/Joe.Schmo/comrot/testcyc192 + -bash-4.2$ ll /scratch1/NCEPDEV/stmp4/Joe.Schmo/comroot/testcyc192 total 88 drwxr-sr-x 4 Joe.Schmo stmp 4096 Oct 22 04:50 enkfgdas.20190529 drwxr-sr-x 4 Joe.Schmo stmp 4096 Oct 22 07:20 enkfgdas.20190530 diff --git a/env/HERCULES.env b/env/HERCULES.env index 3721be2b66..6a4aad7a7d 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -12,7 +12,7 @@ fi step=$1 -export npe_node_max=40 +export npe_node_max=80 export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" @@ -26,19 +26,164 @@ export KMP_AFFINITY=scatter export OMP_STACKSIZE=2048000 export NTHSTACK=1024000000 #export LD_BIND_NOW=1 +export I_MPI_EXTRA_FILESYSTEM=1 +export I_MPI_EXTRA_FILESYSTEM_LIST=lustre ulimit -s unlimited ulimit -a -if [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || \ - [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostpnt" ]] || [[ "${step}" == "wavepostbndpntbll" ]]; then +case ${step} in + "prep" | "prepbufr") + + nth_max=$((npe_node_max / npe_node_prep)) + + export POE="NO" + export BACK=${BACK:-"YES"} + export sys_tp="HERCULES" + export launcher_PREP="srun" + ;; + "preplandobs") + + export APRUN_CALCFIMS="${launcher} -n 1" + ;; + "waveinit" | "waveprep" | "wavepostsbs" | "wavepostbndpnt" | "wavepostpnt" | "wavepostbndpntbll") export CFP_MP="YES" - if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi + [[ "${step}" = "waveprep" ]] && export MP_PULSE=0 export wavempexec=${launcher} export wave_mpmd=${mpmd_opt} -elif [[ "${step}" = "fcst" ]]; then + ;; + "atmanlrun") + + nth_max=$((npe_node_max / npe_node_atmanlrun)) + + export NTHREADS_ATMANL=${nth_atmanlrun:-${nth_max}} + [[ ${NTHREADS_ATMANL} -gt ${nth_max} ]] && export NTHREADS_ATMANL=${nth_max} + export APRUN_ATMANL="${launcher} -n ${npe_atmanlrun} --cpus-per-task=${NTHREADS_ATMANL}" + ;; + "atmensanlrun") + + nth_max=$((npe_node_max / npe_node_atmensanlrun)) + + export NTHREADS_ATMENSANL=${nth_atmensanlrun:-${nth_max}} + [[ ${NTHREADS_ATMENSANL} -gt ${nth_max} ]] && export NTHREADS_ATMENSANL=${nth_max} + export APRUN_ATMENSANL="${launcher} -n ${npe_atmensanlrun} --cpus-per-task=${NTHREADS_ATMENSANL}" + ;; + "aeroanlrun") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_aeroanlrun)) + + export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}} + [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max} + export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}" + ;; + "landanl") + + nth_max=$((npe_node_max / npe_node_landanl)) + + export NTHREADS_LANDANL=${nth_landanl:-${nth_max}} + [[ ${NTHREADS_LANDANL} -gt ${nth_max} ]] && export NTHREADS_LANDANL=${nth_max} + export APRUN_LANDANL="${launcher} -n ${npe_landanl} --cpus-per-task=${NTHREADS_LANDANL}" + + export APRUN_APPLY_INCR="${launcher} -n 6" + ;; + "ocnanalbmat") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalbmat)) + + export NTHREADS_OCNANAL=${nth_ocnanalbmat:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "ocnanalrun") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalrun)) + + export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "ocnanalchkpt") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalchkpt)) + + export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "anal" | "analcalc") + + export MKL_NUM_THREADS=4 + export MKL_CBWR=AUTO + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_anal)) + + export NTHREADS_GSI=${nth_anal:-${nth_max}} + [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max} + export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}" + + export NTHREADS_CALCINC=${nth_calcinc:-1} + [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max} + export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}" + + export NTHREADS_CYCLE=${nth_cycle:-12} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + npe_cycle=${ntiles:-6} + export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}" + + export NTHREADS_GAUSFCANL=1 + npe_gausfcanl=${npe_gausfcanl:-1} + export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}" + ;; + "sfcanl") + nth_max=$((npe_node_max / npe_node_sfcanl)) + + export NTHREADS_CYCLE=${nth_sfcanl:-14} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + npe_sfcanl=${ntiles:-6} + export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}" + ;; + "eobs") + + export MKL_NUM_THREADS=4 + export MKL_CBWR=AUTO + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_eobs)) + + export NTHREADS_GSI=${nth_eobs:-${nth_max}} + [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max} + export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}" + ;; + "eupd") + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_eupd)) + + export NTHREADS_ENKF=${nth_eupd:-${nth_max}} + [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max} + export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}" + ;; + "fcst" | "efcs") export OMP_STACKSIZE=512M if [[ "${CDUMP}" =~ "gfs" ]]; then @@ -53,17 +198,110 @@ elif [[ "${step}" = "fcst" ]]; then # With ESMF threading, the model wants to use the full node export APRUN_UFS="${launcher} -n ${ntasks}" unset nprocs ppn nnodes ntasks + ;; -elif [[ "${step}" = "upp" ]]; then + "upp") nth_max=$((npe_node_max / npe_node_upp)) export NTHREADS_UPP=${nth_upp:-1} [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}" - -elif [[ "${step}" = "atmos_products" ]]; then + ;; + "atmos_products") export USE_CFP="YES" # Use MPMD for downstream product generation + ;; + "ecen") -fi + nth_max=$((npe_node_max / npe_node_ecen)) + + export NTHREADS_ECEN=${nth_ecen:-${nth_max}} + [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max} + export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}" + + export NTHREADS_CHGRES=${nth_chgres:-12} + [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max} + export APRUN_CHGRES="time" + + export NTHREADS_CALCINC=${nth_calcinc:-1} + [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max} + export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}" + + ;; + "esfc") + + nth_max=$((npe_node_max / npe_node_esfc)) + + export NTHREADS_ESFC=${nth_esfc:-${nth_max}} + [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max} + export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}" + + export NTHREADS_CYCLE=${nth_cycle:-14} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}" + + ;; + "epos") + + nth_max=$((npe_node_max / npe_node_epos)) + + export NTHREADS_EPOS=${nth_epos:-${nth_max}} + [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max} + export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}" + + ;; + "postsnd") + + export CFP_MP="YES" + + nth_max=$((npe_node_max / npe_node_postsnd)) + + export NTHREADS_POSTSND=${nth_postsnd:-1} + [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max} + export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}" + + export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1} + [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max} + export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}" + + ;; + "awips") + + nth_max=$((npe_node_max / npe_node_awips)) + + export NTHREADS_AWIPS=${nth_awips:-2} + [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max} + export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}" + + ;; + "gempak") + + export CFP_MP="YES" + + if [[ ${CDUMP} == "gfs" ]]; then + npe_gempak=${npe_gempak_gfs} + npe_node_gempak=${npe_node_gempak_gfs} + fi + + nth_max=$((npe_node_max / npe_node_gempak)) + + export NTHREADS_GEMPAK=${nth_gempak:-1} + [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max} + export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}" + + ;; + "fit2obs") + + nth_max=$((npe_node_max / npe_node_fit2obs)) + + export NTHREADS_FIT2OBS=${nth_fit2obs:-1} + [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max} + export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}" + + ;; + *) + # Some other job not yet defined here + echo "WARNING: The job step ${step} does not specify Hercules-specific resources" + ;; +esac diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT index a1ecc116ea..4a8242abfb 100755 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT +++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT @@ -15,7 +15,11 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalrun" -c "base ocnanal ocnanalr # Begin JOB SPECIFIC work ############################################## -export COMOUT=${COMOUT:-${ROTDIR}/${CDUMP}.${PDY}/${cyc}/ocean} +YMD=${PDY} HH=${cyc} generate_com -rx COM_OCEAN_ANALYSIS + +mkdir -p "${COM_OCEAN_ANALYSIS}" + +export COMOUT=${COM_OCEAN_ANALYSIS} ############################################################### # Run relevant script diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY index 08e7da60c0..4f4251b34f 100755 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY +++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY @@ -14,7 +14,11 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalrun" -c "base ocnanal ocnanalr # Begin JOB SPECIFIC work ############################################## -export COMOUT=${COMOUT:-${ROTDIR}/${CDUMP}.${PDY}/${cyc}/ocean} +YMD=${PDY} HH=${cyc} generate_com -rx COM_OCEAN_ANALYSIS + +mkdir -p "${COM_OCEAN_ANALYSIS}" + +export COMOUT=${COM_OCEAN_ANALYSIS} ############################################################### # Run relevant script diff --git a/jobs/JGFS_ATMOS_FSU_GENESIS b/jobs/JGFS_ATMOS_FSU_GENESIS index cc730e21bb..8b1600de88 100755 --- a/jobs/JGFS_ATMOS_FSU_GENESIS +++ b/jobs/JGFS_ATMOS_FSU_GENESIS @@ -33,25 +33,18 @@ export PYTHONPATH=${USHens_tracker}/FSUgenesisPY:${PYTHONPATH} ############################################## # Define COM and Data directories ############################################## -export COMIN=${ROTDIR}/${RUN}.${PDY}/${cyc}/${COMPONENT} -export COMINgfs=${COMIN} -#export gfsdir=${COMINgfs} -export gfsdir=${ROTDIR} -export COMINgdas=${COMIN} -export gdasdir=${COMINgdas} -export COMOUT=${ROTDIR}/${RUN}.${PDY}/${cyc}/${COMPONENT} -export COMINsyn=${COMINsyn:-$(compath.py ${envir}/com/gfs/${gfs_ver})/syndat} - -if [ ${RUN_ENVIR} = "nco" ]; then - export COMOUThur=${COMROOTp1}/hur/${envir}/global - export COMOUTatcf=${COMROOTp1}/nhc/${envir}/atcf - mkdir -m 775 -p ${COMOUThur} ${COMOUTatcf} -else -# export COMOUThur=$COMOUT -# export COMOUTatcf=$COMOUT - export COMOUThur=${DATA} - export COMOUTatcf=${DATA} -fi +YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_GENESIS +YMD=${PDY} HH=${cyc} GRID="0p25" generate_com -rx COM_ATMOS_GRIB_0p25:COM_ATMOS_GRIB_GRID_TMPL + +# The following variables are used by the tracker scripts which are outside +# of global-workflow and therefore can't be standardized at this time +export COMIN=${COM_ATMOS_GRIB_0p25} +export gfsdir=${COM_ATMOS_GRIB_0p25} +export COMINgfs=${COM_ATMOS_GRIB_0p25} + +export COMOUT=${COM_ATMOS_GENESIS} + +export COMINsyn=${COMINsyn:-$(compath.py "${envir}/com/gfs/${gfs_ver}")/syndat} ############################################## # Run relevant script diff --git a/jobs/JGFS_ATMOS_GEMPAK_META b/jobs/JGFS_ATMOS_GEMPAK_META index b7786b1f49..8e1c05763f 100755 --- a/jobs/JGFS_ATMOS_GEMPAK_META +++ b/jobs/JGFS_ATMOS_GEMPAK_META @@ -6,7 +6,7 @@ # GFS GEMPAK META PRODUCT GENERATION ############################################ source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_meta" -e "base" +source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_meta" -c "base" ############################################### diff --git a/jobs/JGLOBAL_ATMOS_POST_MANAGER b/jobs/JGLOBAL_ATMOS_POST_MANAGER index 1d82537dca..7c726bc2ad 100755 --- a/jobs/JGLOBAL_ATMOS_POST_MANAGER +++ b/jobs/JGLOBAL_ATMOS_POST_MANAGER @@ -12,12 +12,6 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "post" -c "base post" export NET=${NET:-gfs} export RUN=${RUN:-gfs} - -#################################### -# Specify version numbers -#################################### -export gfs_ver=${gfs_ver:-v15.0.0} - #################################### # Specify Execution Areas #################################### @@ -32,10 +26,7 @@ export USHgfs=${HOMEgfs:-${HOMEgfs}/ush} ########################### export EXT_FCST=NO -export ROTDIR=${ROTDIR:-${COMROOT:?}/${NET}/${envir}} -export COMIN=${COMIN:-${ROTDIR}/${RUN}.${PDY}/${cyc}/atmos} -export COMOUT=${COMOUT:-${ROTDIR}/${RUN}.${PDY}/${cyc}/atmos} - +YMD=${PDY} HH=${cyc} generate_com -rx COM_ATMOS_HISTORY ######################################################## # Execute the script. diff --git a/modulefiles/module_base.hera.lua b/modulefiles/module_base.hera.lua index cf77df520a..311fb0a1cf 100644 --- a/modulefiles/module_base.hera.lua +++ b/modulefiles/module_base.hera.lua @@ -6,45 +6,46 @@ spack_stack_ver=(os.getenv("spack_stack_ver") or "None") spack_env=(os.getenv("spack_env") or "None") prepend_path("MODULEPATH", "/scratch1/NCEPDEV/nems/role.epic/spack-stack/spack-stack-" .. spack_stack_ver .. "/envs/" .. spack_env .. "/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) -load(pathJoin("python", os.getenv("python_ver"))) - -load(pathJoin("hpss", os.getenv("hpss_ver"))) -load(pathJoin("gempak", os.getenv("gempak_ver"))) -load(pathJoin("ncl", os.getenv("ncl_ver"))) -load(pathJoin("jasper", os.getenv("jasper_ver"))) -load(pathJoin("libpng", os.getenv("libpng_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) -load(pathJoin("R", os.getenv("R_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf-c", os.getenv("netcdf_c_ver"))) -load(pathJoin("netcdf-fortran", os.getenv("netcdf_fortran_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib-util", os.getenv("grib_util_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("gsi-ncdiag", os.getenv("gsi_ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("bufr", os.getenv("bufr_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) -load(pathJoin("py-netcdf4", os.getenv("py_netcdf4_ver"))) -load(pathJoin("py-pyyaml", os.getenv("py_pyyaml_ver"))) -load(pathJoin("py-jinja2", os.getenv("py_jinja2_ver"))) - -load(pathJoin("met", os.getenv("met_ver"))) -load(pathJoin("metplus", os.getenv("metplus_ver"))) +load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) +load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) + +load(pathJoin("hpss", (os.getenv("hpss_ver") or "None"))) +load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) +load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) +load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) +load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) +load(pathJoin("R", (os.getenv("R_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None"))) +load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("bufr", (os.getenv("bufr_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) +load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) +load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) +load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) + +-- MET/METplus are not available for use with spack-stack, yet +--load(pathJoin("met", (os.getenv("met_ver") or "None"))) +--load(pathJoin("metplus", (os.getenv("metplus_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) --prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.hercules.lua b/modulefiles/module_base.hercules.lua index d587b90c4f..d9c8f5ed0b 100644 --- a/modulefiles/module_base.hercules.lua +++ b/modulefiles/module_base.hercules.lua @@ -6,41 +6,43 @@ spack_stack_ver=(os.getenv("spack_stack_ver") or "None") spack_env=(os.getenv("spack_env") or "None") prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/hercules/spack-stack-" .. spack_stack_ver .. "/envs/" .. spack_env .. "/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) -load(pathJoin("python", os.getenv("python_ver"))) +load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) +load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +load(pathJoin("intel-oneapi-mkl", (os.getenv("intel_mkl_ver") or "None"))) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) +load(pathJoin("perl", (os.getenv("perl_ver") or "None"))) -- TODO load NCL once the SAs remove the 'depends_on' statements within it -- NCL is a static installation and does not depend on any libraries -- but as is will load, among others, the system netcdf-c/4.9.0 module ---load(pathJoin("ncl", os.getenv("ncl_ver"))) -load(pathJoin("jasper", os.getenv("jasper_ver"))) -load(pathJoin("libpng", os.getenv("libpng_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf-c", os.getenv("netcdf_c_ver"))) -load(pathJoin("netcdf-fortran", os.getenv("netcdf_fortran_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib-util", os.getenv("grib_util_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("gsi-ncdiag", os.getenv("gsi_ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("bufr", os.getenv("bufr_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) -load(pathJoin("py-netcdf4", os.getenv("py_netcdf4_ver"))) -load(pathJoin("py-pyyaml", os.getenv("py_pyyaml_ver"))) -load(pathJoin("py-jinja2", os.getenv("py_jinja2_ver"))) +--load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) +load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) +load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None"))) +load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("bufr", (os.getenv("bufr_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) +load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) +load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) +load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.jet.lua b/modulefiles/module_base.jet.lua index 7a0faaaeb6..64d35da57a 100644 --- a/modulefiles/module_base.jet.lua +++ b/modulefiles/module_base.jet.lua @@ -6,42 +6,42 @@ spack_stack_ver=(os.getenv("spack_stack_ver") or "None") spack_env=(os.getenv("spack_env") or "None") prepend_path("MODULEPATH", "/mnt/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-" .. spack_stack_ver .. "/envs/" .. spack_env .. "/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) -load(pathJoin("python", os.getenv("python_ver"))) - -load(pathJoin("hpss", os.getenv("hpss_ver"))) -load(pathJoin("gempak", os.getenv("gempak_ver"))) -load(pathJoin("ncl", os.getenv("ncl_ver"))) -load(pathJoin("jasper", os.getenv("jasper_ver"))) -load(pathJoin("libpng", os.getenv("libpng_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) -load(pathJoin("R", os.getenv("R_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf-c", os.getenv("netcdf_c_ver"))) -load(pathJoin("netcdf-fortran", os.getenv("netcdf_fortran_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib-util", os.getenv("grib_util_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("gsi-ncdiag", os.getenv("gsi_ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("bufr", os.getenv("bufr_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) -load(pathJoin("py-netcdf4", os.getenv("py_netcdf4_ver"))) -load(pathJoin("py-pyyaml", os.getenv("py_pyyaml_ver"))) -load(pathJoin("py-jinja2", os.getenv("py_jinja2_ver"))) +load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) +load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) + +load(pathJoin("hpss", (os.getenv("hpss_ver") or "None"))) +load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) +load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) +load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) +load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) +load(pathJoin("R", (os.getenv("R_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None"))) +load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("bufr", (os.getenv("bufr_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) +load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) +load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) +load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) --prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.orion.lua b/modulefiles/module_base.orion.lua index 3464accc89..65486855d0 100644 --- a/modulefiles/module_base.orion.lua +++ b/modulefiles/module_base.orion.lua @@ -6,43 +6,44 @@ spack_stack_ver=(os.getenv("spack_stack_ver") or "None") spack_env=(os.getenv("spack_env") or "None") prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-" .. spack_stack_ver .. "/envs/" .. spack_env .. "/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) -load(pathJoin("python", os.getenv("python_ver"))) - -load(pathJoin("gempak", os.getenv("gempak_ver"))) -load(pathJoin("ncl", os.getenv("ncl_ver"))) -load(pathJoin("jasper", os.getenv("jasper_ver"))) -load(pathJoin("libpng", os.getenv("libpng_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf-c", os.getenv("netcdf_c_ver"))) -load(pathJoin("netcdf-fortran", os.getenv("netcdf_fortran_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib-util", os.getenv("grib_util_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("gsi-ncdiag", os.getenv("gsi_ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("bufr", os.getenv("bufr_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) -load(pathJoin("py-netcdf4", os.getenv("py_netcdf4_ver"))) -load(pathJoin("py-pyyaml", os.getenv("py_pyyaml_ver"))) -load(pathJoin("py-jinja2", os.getenv("py_jinja2_ver"))) - -load(pathJoin("met", os.getenv("met_ver"))) -load(pathJoin("metplus", os.getenv("metplus_ver"))) +load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) +load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) + +load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) +load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) +load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) +load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None"))) +load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("bufr", (os.getenv("bufr_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) +load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) +load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) +load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) + +-- MET/METplus are not yet supported with spack-stack +--load(pathJoin("met", (os.getenv("met_ver") or "None"))) +--load(pathJoin("metplus", (os.getenv("metplus_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) --prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.s4.lua b/modulefiles/module_base.s4.lua index 51eeee0f10..d99a93c3f4 100644 --- a/modulefiles/module_base.s4.lua +++ b/modulefiles/module_base.s4.lua @@ -6,39 +6,39 @@ spack_stack_ver=(os.getenv("spack_stack_ver") or "None") spack_env=(os.getenv("spack_env") or "None") prepend_path("MODULEPATH", "/data/prod/jedi/spack-stack/spack-stack-" .. spack_stack_ver .. "/envs/" .. spack_env .. "/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) -load(pathJoin("python", os.getenv("python_ver"))) - -load(pathJoin("ncl", os.getenv("ncl_ver"))) -load(pathJoin("jasper", os.getenv("jasper_ver"))) -load(pathJoin("libpng", os.getenv("libpng_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf-c", os.getenv("netcdf_c_ver"))) -load(pathJoin("netcdf-fortran", os.getenv("netcdf_fortran_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib-util", os.getenv("grib_util_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("gsi-ncdiag", os.getenv("gsi_ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("bufr", os.getenv("bufr_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) -load(pathJoin("py-netcdf4", os.getenv("py_netcdf4_ver"))) -load(pathJoin("py-pyyaml", os.getenv("py_pyyaml_ver"))) -load(pathJoin("py-jinja2", os.getenv("py_jinja2_ver"))) +load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) +load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) + +load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) +load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) +load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None"))) +load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("bufr", (os.getenv("bufr_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) +load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None"))) +load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None"))) +load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None"))) setenv("WGRIB2","wgrib2") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) --prepend_path("MODULEPATH", pathJoin("/data/prod/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) prepend_path("MODULEPATH", pathJoin("/data/prod/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/data/prod/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.wcoss2.lua b/modulefiles/module_base.wcoss2.lua index 57d3bf51dd..ee4ee6a5fb 100644 --- a/modulefiles/module_base.wcoss2.lua +++ b/modulefiles/module_base.wcoss2.lua @@ -2,43 +2,43 @@ help([[ Load environment to run GFS on WCOSS2 ]]) -load(pathJoin("PrgEnv-intel", os.getenv("PrgEnv_intel_ver"))) -load(pathJoin("craype", os.getenv("craype_ver"))) -load(pathJoin("intel", os.getenv("intel_ver"))) -load(pathJoin("cray-mpich", os.getenv("cray_mpich_ver"))) -load(pathJoin("cray-pals", os.getenv("cray_pals_ver"))) -load(pathJoin("cfp", os.getenv("cfp_ver"))) +load(pathJoin("PrgEnv-intel", (os.getenv("PrgEnv_intel_ver") or "None"))) +load(pathJoin("craype", (os.getenv("craype_ver") or "None"))) +load(pathJoin("intel", (os.getenv("intel_ver") or "None"))) +load(pathJoin("cray-mpich", (os.getenv("cray_mpich_ver") or "None"))) +load(pathJoin("cray-pals", (os.getenv("cray_pals_ver") or "None"))) +load(pathJoin("cfp", (os.getenv("cfp_ver") or "None"))) setenv("USE_CFP","YES") -load(pathJoin("python", os.getenv("python_ver"))) -load(pathJoin("gempak", os.getenv("gempak_ver"))) -load(pathJoin("perl", os.getenv("perl_ver"))) -load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) -load(pathJoin("udunits", os.getenv("udunits_ver"))) -load(pathJoin("gsl", os.getenv("gsl_ver"))) -load(pathJoin("cdo", os.getenv("cdo_ver"))) - -load(pathJoin("hdf5", os.getenv("hdf5_ver"))) -load(pathJoin("netcdf", os.getenv("netcdf_ver"))) - -load(pathJoin("nco", os.getenv("nco_ver"))) -load(pathJoin("prod_util", os.getenv("prod_util_ver"))) -load(pathJoin("grib_util", os.getenv("grib_util_ver"))) -load(pathJoin("bufr_dump", os.getenv("bufr_dump_ver"))) -load(pathJoin("util_shared", os.getenv("util_shared_ver"))) -load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) -load(pathJoin("ncdiag", os.getenv("ncdiag_ver"))) -load(pathJoin("crtm", os.getenv("crtm_ver"))) -load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) - ---prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/v" .. os.getenv("prepobs_run_ver"), "modulefiles")) +load(pathJoin("python", (os.getenv("python_ver") or "None"))) +load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) +load(pathJoin("perl", (os.getenv("perl_ver") or "None"))) +load(pathJoin("libjpeg", (os.getenv("libjpeg_ver") or "None"))) +load(pathJoin("udunits", (os.getenv("udunits_ver") or "None"))) +load(pathJoin("gsl", (os.getenv("gsl_ver") or "None"))) +load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) + +load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) +load(pathJoin("netcdf", (os.getenv("netcdf_ver") or "None"))) + +load(pathJoin("nco", (os.getenv("nco_ver") or "None"))) +load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None"))) +load(pathJoin("grib_util", (os.getenv("grib_util_ver") or "None"))) +load(pathJoin("bufr_dump", (os.getenv("bufr_dump_ver") or "None"))) +load(pathJoin("util_shared", (os.getenv("util_shared_ver") or "None"))) +load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None"))) +load(pathJoin("ncdiag", (os.getenv("ncdiag_ver") or "None"))) +load(pathJoin("crtm", (os.getenv("crtm_ver") or "None"))) +load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None"))) + +--prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) -load(pathJoin("prepobs", os.getenv("prepobs_run_ver"))) +load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) -prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/Fit2Obs/v" .. os.getenv("fit2obs_ver"), "modulefiles")) -load(pathJoin("fit2obs", os.getenv("fit2obs_ver"))) +prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) +load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) append_path("MODULEPATH", pathJoin("/apps/ops/prod/nco/models/modulefiles")) -load(pathJoin("mos_shared", os.getenv("mos_shared_ver"))) +load(pathJoin("mos_shared", (os.getenv("mos_shared_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/parm/config/gefs/config.base.emc.dyn b/parm/config/gefs/config.base.emc.dyn index 64a38f669f..5358a37768 100644 --- a/parm/config/gefs/config.base.emc.dyn +++ b/parm/config/gefs/config.base.emc.dyn @@ -102,7 +102,7 @@ export EXP_WARM_START="@EXP_WARM_START@" export assim_freq=6 export PSLOT="@PSLOT@" export EXPDIR="@EXPDIR@/${PSLOT}" -export ROTDIR="@ROTDIR@/${PSLOT}" +export ROTDIR="@COMROOT@/${PSLOT}" export DATAROOT="${STMP}/RUNDIRS/${PSLOT}" # TODO: set via prod_envir in Ops export RUNDIR="${DATAROOT}" # TODO: Should be removed; use DATAROOT instead @@ -154,35 +154,27 @@ export OPS_RES="C768" # Do not change export LEVS=128 export CASE="@CASECTL@" # CASE is required in GEFS to determine ocean/ice/wave resolutions export CASE_ENS="@CASEENS@" -# TODO: This should not depend on $CASE or $CASE_ENS -# These are the currently available grid-combinations +export OCNRES="@OCNRES@" +export ICERES="${OCNRES}" +# These are the currently recommended grid-combinations case "${CASE}" in "C48") - export OCNRES=500 export waveGRD='glo_500' ;; - "C96") - export OCNRES=100 - export waveGRD='glo_200' - ;; - "C192") - export OCNRES=050 + "C96" | "C192") export waveGRD='glo_200' ;; "C384") - export OCNRES=025 export waveGRD='glo_025' ;; - "C768") - export OCNRES=025 - export waveGRD='mx025' + "C768" | "C1152") + export waveGRD='mx025' ;; *) - export OCNRES=025 - export waveGRD='glo_025' + echo "FATAL ERROR: Unrecognized CASE ${CASE}, ABORT!" + exit 1 ;; esac -export ICERES=${OCNRES} case "${APP}" in ATM) diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn deleted file mode 100644 index b726c1788a..0000000000 --- a/parm/config/gfs/config.base.emc.dyn +++ /dev/null @@ -1,406 +0,0 @@ -#! /usr/bin/env bash - -########## config.base ########## -# Common to all steps - -echo "BEGIN: config.base" - -# Machine environment -export machine="@MACHINE@" - -# EMC parallel or NCO production -export RUN_ENVIR="emc" - -# Account, queue, etc. -export ACCOUNT="@ACCOUNT@" -export QUEUE="@QUEUE@" -export QUEUE_SERVICE="@QUEUE_SERVICE@" -export PARTITION_BATCH="@PARTITION_BATCH@" -export PARTITION_SERVICE="@PARTITION_SERVICE@" - -# Project to use in mass store: -export HPSS_PROJECT="@HPSS_PROJECT@" - -# Directories relative to installation areas: -export HOMEgfs=@HOMEgfs@ -export PARMgfs="${HOMEgfs}/parm" -export FIXgfs="${HOMEgfs}/fix" -export USHgfs="${HOMEgfs}/ush" -export UTILgfs="${HOMEgfs}/util" -export EXECgfs="${HOMEgfs}/exec" -export SCRgfs="${HOMEgfs}/scripts" - -export FIXam="${FIXgfs}/am" -export FIXaer="${FIXgfs}/aer" -export FIXcpl="${FIXgfs}/cpl" -export FIXlut="${FIXgfs}/lut" -export FIXorog="${FIXgfs}/orog" -export FIXcice="${FIXgfs}/cice" -export FIXmom="${FIXgfs}/mom6" -export FIXreg2grb2="${FIXgfs}/reg2grb2" -export FIXugwd="${FIXgfs}/ugwd" - -######################################################################## - -# GLOBAL static environment parameters -export PACKAGEROOT="@PACKAGEROOT@" # TODO: set via prod_envir in Ops -export COMROOT="@COMROOT@" # TODO: set via prod_envir in Ops -export COMINsyn="@COMINsyn@" -export DMPDIR="@DMPDIR@" -export BASE_CPLIC="@BASE_CPLIC@" - -# USER specific paths -export HOMEDIR="@HOMEDIR@" -export STMP="@STMP@" -export PTMP="@PTMP@" -export NOSCRUB="@NOSCRUB@" - -# Base directories for various builds -export BASE_GIT="@BASE_GIT@" - -# Toggle to turn on/off GFS downstream processing. -export DO_GOES="NO" # GOES products -export DO_BUFRSND="NO" # BUFR sounding products -export DO_GEMPAK="NO" # GEMPAK products -export DO_AWIPS="NO" # AWIPS products -export DO_NPOESS="NO" # NPOESS products -export DO_TRACKER="YES" # Hurricane track verification -export DO_GENESIS="YES" # Cyclone genesis verification -export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU) -export DO_VERFOZN="YES" # Ozone data assimilation monitoring -export DO_VERFRAD="YES" # Radiance data assimilation monitoring -export DO_VMINMON="YES" # GSI minimization monitoring -export DO_MOS="NO" # GFS Model Output Statistics - Only supported on WCOSS2 - -# NO for retrospective parallel; YES for real-time parallel -# arch.sh uses REALTIME for MOS. Need to set REALTIME=YES -# if want MOS written to HPSS. Should update arch.sh to -# use RUNMOS flag -export REALTIME="YES" - -# Experiment mode (cycled or forecast-only) -export MODE="@MODE@" # cycled/forecast-only - -#################################################### -# DO NOT ADD MACHINE DEPENDENT STUFF BELOW THIS LINE -# IF YOU HAVE TO MAKE MACHINE SPECIFIC CHANGES BELOW -# FEEL FREE TO MOVE THEM ABOVE THIS LINE TO KEEP IT -# CLEAR -#################################################### -# Build paths relative to $HOMEgfs -export FIXgsi="${HOMEgfs}/fix/gsi" -export HOMEpost="${HOMEgfs}" -export HOMEobsproc="${BASE_GIT:-}/obsproc/v${obsproc_run_ver:-1.1.2}" - -# CONVENIENT utility scripts and other environment parameters -export NCP="/bin/cp -p" -export NMV="/bin/mv" -export NLN="/bin/ln -sf" -export VERBOSE="YES" -export KEEPDATA="NO" -export CHGRP_RSTPROD="@CHGRP_RSTPROD@" -export CHGRP_CMD="@CHGRP_CMD@" -export NCDUMP="${NETCDF:-${netcdf_c_ROOT:-}}/bin/ncdump" -export NCLEN="${HOMEgfs}/ush/getncdimlen" - -# Machine environment, jobs, and other utility scripts -export BASE_ENV="${HOMEgfs}/env" -export BASE_JOB="${HOMEgfs}/jobs/rocoto" - -# EXPERIMENT specific environment parameters -export SDATE=@SDATE@ -export EDATE=@EDATE@ -export EXP_WARM_START="@EXP_WARM_START@" -export assim_freq=6 -export PSLOT="@PSLOT@" -export EXPDIR="@EXPDIR@/${PSLOT}" -export ROTDIR="@ROTDIR@/${PSLOT}" -export ROTDIR_DUMP="YES" #Note: A value of "NO" does not currently work -export DUMP_SUFFIX="" -if [[ "${PDY}${cyc}" -ge "2019092100" && "${PDY}${cyc}" -le "2019110700" ]]; then - export DUMP_SUFFIX="p" # Use dumps from NCO GFS v15.3 parallel -fi -export DATAROOT="${STMP}/RUNDIRS/${PSLOT}" # TODO: set via prod_envir in Ops -export RUNDIR="${DATAROOT}" # TODO: Should be removed; use DATAROOT instead -export ARCDIR="${NOSCRUB}/archive/${PSLOT}" -export ATARDIR="@ATARDIR@" - -# Commonly defined parameters in JJOBS -export envir=${envir:-"prod"} -export NET="gfs" # NET is defined in the job-card (ecf) -export RUN=${RUN:-${CDUMP:-"gfs"}} # RUN is defined in the job-card (ecf); CDUMP is used at EMC as a RUN proxy -# TODO: determine where is RUN actually used in the workflow other than here -# TODO: is it possible to replace all instances of ${CDUMP} to ${RUN} to be -# consistent w/ EE2? - -# Get all the COM path templates -source "${EXPDIR}/config.com" - -export ERRSCRIPT=${ERRSCRIPT:-'eval [[ $err = 0 ]]'} -export LOGSCRIPT=${LOGSCRIPT:-""} -#export ERRSCRIPT=${ERRSCRIPT:-"err_chk"} -#export LOGSCRIPT=${LOGSCRIPT:-"startmsg"} -export REDOUT="1>" -export REDERR="2>" - -export SENDECF=${SENDECF:-"NO"} -export SENDSDM=${SENDSDM:-"NO"} -export SENDDBN_NTC=${SENDDBN_NTC:-"NO"} -export SENDDBN=${SENDDBN:-"NO"} -export DBNROOT=${DBNROOT:-${UTILROOT:-}/fakedbn} - -# APP settings -export APP=@APP@ - -# Defaults: -export DO_ATM="YES" -export DO_COUPLED="NO" -export DO_WAVE="NO" -export DO_OCN="NO" -export DO_ICE="NO" -export DO_AERO="NO" -export WAVE_CDUMP="" # When to include wave suite: gdas, gfs, or both -export DOBNDPNT_WAVE="NO" -export FRAC_GRID=".true." - -# Set operational resolution -export OPS_RES="C768" # Do not change # TODO: Why is this needed and where is it used? - -# Resolution specific parameters -export LEVS=128 -export CASE="@CASECTL@" -export CASE_ENS="@CASEENS@" -# TODO: This should not depend on $CASE or $CASE_ENS -# These are the currently available grid-combinations -case "${CASE}" in - "C48") - export OCNRES=500 - export waveGRD='glo_500' - ;; - "C96") - export OCNRES=500 - export waveGRD='glo_200' - ;; - "C192") - export OCNRES=050 - export waveGRD='glo_200' - ;; - "C384") - export OCNRES=025 - export waveGRD='glo_025' - ;; - "C768" | "C1152") - export OCNRES=025 - export waveGRD='mx025' - ;; - *) - echo "FATAL ERROR: Unrecognized CASE ${CASE}, ABORT!" - exit 1 - ;; -esac -export ICERES=${OCNRES} - -case "${APP}" in - ATM) - ;; - ATMA) - export DO_AERO="YES" - ;; - ATMW) - export DO_COUPLED="YES" - export DO_WAVE="YES" - export WAVE_CDUMP="both" - ;; - NG-GODAS) - export DO_ATM="NO" - export DO_OCN="YES" - export DO_ICE="YES" - ;; - S2S*) - export DO_COUPLED="YES" - export DO_OCN="YES" - export DO_ICE="YES" - - if [[ "${APP}" =~ A$ ]]; then - export DO_AERO="YES" - fi - - if [[ "${APP}" =~ ^S2SW ]]; then - export DO_WAVE="YES" - export WAVE_CDUMP="both" - fi - ;; - *) - echo "Unrecognized APP: '${APP}'" - exit 1 - ;; -esac - -# Surface cycle update frequency -if [[ "${CDUMP}" =~ "gdas" ]] ; then - export FHCYC=1 - export FTSFS=10 -elif [[ "${CDUMP}" =~ "gfs" ]] ; then - export FHCYC=24 -fi - -# Output frequency of the forecast model (for cycling) -export FHMIN=0 -export FHMAX=9 -export FHOUT=3 # Will be changed to 1 in config.base if (DOHYBVAR set to NO and l4densvar set to false) - -# Cycle to run EnKF (set to BOTH for both gfs and gdas) -export EUPD_CYC="gdas" - -# GFS cycle info -export gfs_cyc=@gfs_cyc@ # 0: no GFS cycle, 1: 00Z only, 2: 00Z and 12Z only, 4: all 4 cycles. - -# GFS output and frequency -export FHMIN_GFS=0 - -export FHMAX_GFS_00=120 -export FHMAX_GFS_06=120 -export FHMAX_GFS_12=120 -export FHMAX_GFS_18=120 -current_fhmax_var=FHMAX_GFS_${cyc}; declare -x FHMAX_GFS=${!current_fhmax_var} - -export FHOUT_GFS=6 # Must be 6 for S2S until #1629 is addressed; 3 for ops -export FHMAX_HF_GFS=0 -export FHOUT_HF_GFS=1 -if (( gfs_cyc != 0 )); then - export STEP_GFS=$(( 24 / gfs_cyc )) -else - export STEP_GFS="0" -fi -export ILPOST=1 # gempak output frequency up to F120 - -# GFS restart interval in hours -#JKHexport restart_interval_gfs=12 -export restart_interval_gfs=-1 ## JKH -# NOTE: Do not set this to zero. Instead set it to $FHMAX_GFS -# TODO: Remove this variable from config.base and reference from config.fcst -# TODO: rework logic in config.wave and push it to parsing_nameslist_WW3.sh where it is actually used - -export QUILTING=".true." -export OUTPUT_GRID="gaussian_grid" -export WRITE_DOPOST=".true." # WRITE_DOPOST=true, use inline POST -export WRITE_NSFLIP=".true." - -# IAU related parameters -export DOIAU="@DOIAU@" # Enable 4DIAU for control with 3 increments -export IAUFHRS="3,6,9" -export IAU_FHROT=${IAUFHRS%%,*} -export IAU_DELTHRS=6 -export IAU_OFFSET=6 -export DOIAU_ENKF=${DOIAU:-"YES"} # Enable 4DIAU for EnKF ensemble -export IAUFHRS_ENKF="3,6,9" -export IAU_DELTHRS_ENKF=6 - -# Use Jacobians in eupd and thereby remove need to run eomg -export lobsdiag_forenkf=".true." - -# if [[ "$SDATE" -lt "2019020100" ]]; then # no rtofs in GDA -# export DO_WAVE="NO" -# echo "WARNING: Wave suite turned off due to lack of RTOFS in GDA for SDATE" -# fi - -# Microphysics Options: 99-ZhaoCarr, 8-Thompson; 6-WSM6, 10-MG, 11-GFDL -export imp_physics=8 - -# Shared parameters -# DA engine -export DO_JEDIATMVAR="@DO_JEDIATMVAR@" -export DO_JEDIATMENS="@DO_JEDIATMENS@" -export DO_JEDIOCNVAR="@DO_JEDIOCNVAR@" -export DO_JEDILANDDA="@DO_JEDILANDDA@" -export DO_MERGENSST="@DO_MERGENSST@" - -# Hybrid related -export DOHYBVAR="@DOHYBVAR@" -export NMEM_ENS=@NMEM_ENS@ -export NMEM_ENS_GFS=@NMEM_ENS@ -export SMOOTH_ENKF="NO" -export l4densvar=".true." -export lwrite4danl=".true." - -# EnKF output frequency -if [[ ${DOHYBVAR} = "YES" ]]; then - export FHMIN_ENKF=3 - export FHMAX_ENKF=9 - export FHMAX_ENKF_GFS=120 - export FHOUT_ENKF_GFS=3 - if [[ ${l4densvar} = ".true." ]]; then - export FHOUT=1 - export FHOUT_ENKF=1 - else - export FHOUT_ENKF=3 - fi -fi - -# if 3DVAR and IAU -if [[ ${DOHYBVAR} == "NO" && ${DOIAU} == "YES" ]]; then - export IAUFHRS="6" - export IAU_FHROT="3" - export IAU_FILTER_INCREMENTS=".true." - export IAUFHRS_ENKF="6" -fi - -# Check if cycle is cold starting, DOIAU off, or free-forecast mode -if [[ "${MODE}" = "cycled" && "${SDATE}" = "${PDY}${cyc}" && ${EXP_WARM_START} = ".false." ]] || [[ "${DOIAU}" = "NO" ]] || [[ "${MODE}" = "forecast-only" && ${EXP_WARM_START} = ".false." ]] ; then - export IAU_OFFSET=0 - export IAU_FHROT=0 - export IAUFHRS="6" -fi - -if [[ "${DOIAU_ENKF}" = "NO" ]]; then export IAUFHRS_ENKF="6"; fi - -# turned on nsst in anal and/or fcst steps, and turn off rtgsst -export DONST="YES" -if [[ ${DONST} = "YES" ]]; then export FNTSFA=" "; fi - -# The switch to apply SST elevation correction or not -export nst_anl=.true. - -# Make the nsstbufr file on the fly or use the GDA version -export MAKE_NSSTBUFR="@MAKE_NSSTBUFR@" - -# Make the aircraft prepbufr file on the fly or use the GDA version -export MAKE_ACFTBUFR="@MAKE_ACFTBUFR@" - -# Analysis increments to zero in CALCINCEXEC -export INCREMENTS_TO_ZERO="'liq_wat_inc','icmr_inc'" - -# Write analysis files for early cycle EnKF -export DO_CALC_INCREMENT_ENKF_GFS="YES" - -# Stratospheric increments to zero -export INCVARS_ZERO_STRAT="'sphum_inc','liq_wat_inc','icmr_inc'" -export INCVARS_EFOLD="5" - -# Swith to generate netcdf or binary diagnostic files. If not specified, -# script default to binary diagnostic files. Set diagnostic file -# variables here since used in DA job -export netcdf_diag=".true." -export binary_diag=".false." - -# Verification options -export DO_METP="NO" # Run METPLUS jobs - set METPLUS settings in config.metp; not supported with spack-stack -export DO_FIT2OBS="YES" # Run fit to observations package - -# Archiving options -export HPSSARCH="@HPSSARCH@" # save data to HPSS archive -export LOCALARCH="@LOCALARCH@" # save data to local archive -if [[ ${HPSSARCH} = "YES" ]] && [[ ${LOCALARCH} = "YES" ]]; then - echo "Both HPSS and local archiving selected. Please choose one or the other." - exit 2 -fi -export ARCH_CYC=00 # Archive data at this cycle for warm_start capability -export ARCH_WARMICFREQ=4 # Archive frequency in days for warm_start capability -export ARCH_FCSTICFREQ=1 # Archive frequency in days for gdas and gfs forecast-only capability - -#--online archive of nemsio files for fit2obs verification -export FITSARC="YES" -export FHMAX_FITS=132 -[[ "${FHMAX_FITS}" -gt "${FHMAX_GFS}" ]] && export FHMAX_FITS=${FHMAX_GFS} - -echo "END: config.base" diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn new file mode 120000 index 0000000000..6e9cfcec1a --- /dev/null +++ b/parm/config/gfs/config.base.emc.dyn @@ -0,0 +1 @@ +config.base.emc.dyn_hera \ No newline at end of file diff --git a/parm/config/gfs/config.base.emc.dyn_emc b/parm/config/gfs/config.base.emc.dyn_emc index b726c1788a..88a9643ab8 100644 --- a/parm/config/gfs/config.base.emc.dyn_emc +++ b/parm/config/gfs/config.base.emc.dyn_emc @@ -59,17 +59,24 @@ export NOSCRUB="@NOSCRUB@" export BASE_GIT="@BASE_GIT@" # Toggle to turn on/off GFS downstream processing. -export DO_GOES="NO" # GOES products +export DO_GOES="@DO_GOES@" # GOES products export DO_BUFRSND="NO" # BUFR sounding products export DO_GEMPAK="NO" # GEMPAK products export DO_AWIPS="NO" # AWIPS products -export DO_NPOESS="NO" # NPOESS products +export DO_NPOESS="NO" # NPOESS products export DO_TRACKER="YES" # Hurricane track verification export DO_GENESIS="YES" # Cyclone genesis verification export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU) -export DO_VERFOZN="YES" # Ozone data assimilation monitoring -export DO_VERFRAD="YES" # Radiance data assimilation monitoring -export DO_VMINMON="YES" # GSI minimization monitoring +# The monitor is not yet supported on Hercules +if [[ "${machine}" == "HERCULES" ]]; then + export DO_VERFOZN="NO" # Ozone data assimilation monitoring + export DO_VERFRAD="NO" # Radiance data assimilation monitoring + export DO_VMINMON="NO" # GSI minimization monitoring +else + export DO_VERFOZN="YES" # Ozone data assimilation monitoring + export DO_VERFRAD="YES" # Radiance data assimilation monitoring + export DO_VMINMON="YES" # GSI minimization monitoring +fi export DO_MOS="NO" # GFS Model Output Statistics - Only supported on WCOSS2 # NO for retrospective parallel; YES for real-time parallel @@ -114,7 +121,7 @@ export EXP_WARM_START="@EXP_WARM_START@" export assim_freq=6 export PSLOT="@PSLOT@" export EXPDIR="@EXPDIR@/${PSLOT}" -export ROTDIR="@ROTDIR@/${PSLOT}" +export ROTDIR="@COMROOT@/${PSLOT}" export ROTDIR_DUMP="YES" #Note: A value of "NO" does not currently work export DUMP_SUFFIX="" if [[ "${PDY}${cyc}" -ge "2019092100" && "${PDY}${cyc}" -le "2019110700" ]]; then @@ -170,27 +177,20 @@ export OPS_RES="C768" # Do not change # TODO: Why is this needed and where is it export LEVS=128 export CASE="@CASECTL@" export CASE_ENS="@CASEENS@" -# TODO: This should not depend on $CASE or $CASE_ENS -# These are the currently available grid-combinations +export OCNRES="@OCNRES@" +export ICERES="${OCNRES}" +# These are the currently recommended grid-combinations case "${CASE}" in "C48") - export OCNRES=500 export waveGRD='glo_500' ;; - "C96") - export OCNRES=500 - export waveGRD='glo_200' - ;; - "C192") - export OCNRES=050 + "C96" | "C192") export waveGRD='glo_200' ;; "C384") - export OCNRES=025 export waveGRD='glo_025' ;; "C768" | "C1152") - export OCNRES=025 export waveGRD='mx025' ;; *) @@ -198,7 +198,6 @@ case "${CASE}" in exit 1 ;; esac -export ICERES=${OCNRES} case "${APP}" in ATM) diff --git a/parm/config/gfs/config.base.emc.dyn_hera b/parm/config/gfs/config.base.emc.dyn_hera index 2f07dc7970..231b48b0b2 100644 --- a/parm/config/gfs/config.base.emc.dyn_hera +++ b/parm/config/gfs/config.base.emc.dyn_hera @@ -59,7 +59,7 @@ export NOSCRUB="@NOSCRUB@" export BASE_GIT="@BASE_GIT@" # Toggle to turn on/off GFS downstream processing. -export DO_GOES="NO" # GOES products +export DO_GOES="@DO_GOES@" # GOES products export DO_BUFRSND="NO" # BUFR sounding products export DO_GEMPAK="NO" # GEMPAK products export DO_AWIPS="NO" # AWIPS products @@ -67,9 +67,16 @@ export DO_NPOESS="NO" # NPOESS products export DO_TRACKER="NO" # Hurricane track verification ## JKH export DO_GENESIS="NO" # Cyclone genesis verification ## JKH export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU) -export DO_VERFOZN="YES" # Ozone data assimilation monitoring -export DO_VERFRAD="YES" # Radiance data assimilation monitoring -export DO_VMINMON="YES" # GSI minimization monitoring +# The monitor is not yet supported on Hercules +if [[ "${machine}" == "HERCULES" ]]; then + export DO_VERFOZN="NO" # Ozone data assimilation monitoring + export DO_VERFRAD="NO" # Radiance data assimilation monitoring + export DO_VMINMON="NO" # GSI minimization monitoring +else + export DO_VERFOZN="YES" # Ozone data assimilation monitoring + export DO_VERFRAD="YES" # Radiance data assimilation monitoring + export DO_VMINMON="YES" # GSI minimization monitoring +fi export DO_MOS="NO" # GFS Model Output Statistics - Only supported on WCOSS2 # NO for retrospective parallel; YES for real-time parallel @@ -114,7 +121,7 @@ export EXP_WARM_START="@EXP_WARM_START@" export assim_freq=6 export PSLOT="@PSLOT@" export EXPDIR="@EXPDIR@/${PSLOT}" -export ROTDIR="@ROTDIR@/${PSLOT}" +export ROTDIR="@COMROOT@/${PSLOT}" export ROTDIR_DUMP="YES" #Note: A value of "NO" does not currently work export DUMP_SUFFIX="" if [[ "${PDY}${cyc}" -ge "2019092100" && "${PDY}${cyc}" -le "2019110700" ]]; then @@ -170,27 +177,20 @@ export OPS_RES="C768" # Do not change # TODO: Why is this needed and where is it export LEVS=128 export CASE="@CASECTL@" export CASE_ENS="@CASEENS@" -# TODO: This should not depend on $CASE or $CASE_ENS -# These are the currently available grid-combinations +export OCNRES="@OCNRES@" +export ICERES="${OCNRES}" +# These are the currently recommended grid-combinations case "${CASE}" in "C48") - export OCNRES=500 export waveGRD='glo_500' ;; - "C96") - export OCNRES=500 - export waveGRD='glo_200' - ;; - "C192") - export OCNRES=050 + "C96" | "C192") export waveGRD='glo_200' ;; "C384") - export OCNRES=025 export waveGRD='glo_025' ;; "C768" | "C1152") - export OCNRES=025 export waveGRD='mx025' ;; *) @@ -198,7 +198,6 @@ case "${CASE}" in exit 1 ;; esac -export ICERES=${OCNRES} case "${APP}" in ATM) diff --git a/parm/config/gfs/config.base.emc.dyn_jet b/parm/config/gfs/config.base.emc.dyn_jet index df6498d7b3..be130a79ef 100644 --- a/parm/config/gfs/config.base.emc.dyn_jet +++ b/parm/config/gfs/config.base.emc.dyn_jet @@ -59,17 +59,24 @@ export NOSCRUB="@NOSCRUB@" export BASE_GIT="@BASE_GIT@" # Toggle to turn on/off GFS downstream processing. -export DO_GOES="NO" # GOES products +export DO_GOES="@DO_GOES@" # GOES products export DO_BUFRSND="NO" # BUFR sounding products export DO_GEMPAK="NO" # GEMPAK products export DO_AWIPS="NO" # AWIPS products -export DO_NPOESS="NO" # NPOESS products +export DO_NPOESS="NO" # NPOESS products export DO_TRACKER="YES" # Hurricane track verification export DO_GENESIS="NO" # Cyclone genesis verification ## JKH export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU) -export DO_VERFOZN="YES" # Ozone data assimilation monitoring -export DO_VERFRAD="YES" # Radiance data assimilation monitoring -export DO_VMINMON="YES" # GSI minimization monitoring +# The monitor is not yet supported on Hercules +if [[ "${machine}" == "HERCULES" ]]; then + export DO_VERFOZN="NO" # Ozone data assimilation monitoring + export DO_VERFRAD="NO" # Radiance data assimilation monitoring + export DO_VMINMON="NO" # GSI minimization monitoring +else + export DO_VERFOZN="YES" # Ozone data assimilation monitoring + export DO_VERFRAD="YES" # Radiance data assimilation monitoring + export DO_VMINMON="YES" # GSI minimization monitoring +fi export DO_MOS="NO" # GFS Model Output Statistics - Only supported on WCOSS2 # NO for retrospective parallel; YES for real-time parallel @@ -114,7 +121,7 @@ export EXP_WARM_START="@EXP_WARM_START@" export assim_freq=6 export PSLOT="@PSLOT@" export EXPDIR="@EXPDIR@/${PSLOT}" -export ROTDIR="@ROTDIR@/${PSLOT}" +export ROTDIR="@COMROOT@/${PSLOT}" export ROTDIR_DUMP="YES" #Note: A value of "NO" does not currently work export DUMP_SUFFIX="" if [[ "${PDY}${cyc}" -ge "2019092100" && "${PDY}${cyc}" -le "2019110700" ]]; then @@ -170,27 +177,20 @@ export OPS_RES="C768" # Do not change # TODO: Why is this needed and where is it export LEVS=128 export CASE="@CASECTL@" export CASE_ENS="@CASEENS@" -# TODO: This should not depend on $CASE or $CASE_ENS -# These are the currently available grid-combinations +export OCNRES="@OCNRES@" +export ICERES="${OCNRES}" +# These are the currently recommended grid-combinations case "${CASE}" in "C48") - export OCNRES=500 export waveGRD='glo_500' ;; - "C96") - export OCNRES=500 - export waveGRD='glo_200' - ;; - "C192") - export OCNRES=050 + "C96" | "C192") export waveGRD='glo_200' ;; "C384") - export OCNRES=025 export waveGRD='glo_025' ;; "C768" | "C1152") - export OCNRES=025 export waveGRD='mx025' ;; *) @@ -198,7 +198,6 @@ case "${CASE}" in exit 1 ;; esac -export ICERES=${OCNRES} case "${APP}" in ATM) diff --git a/parm/config/gfs/config.com b/parm/config/gfs/config.com index 208b0ac096..db648b5866 100644 --- a/parm/config/gfs/config.com +++ b/parm/config/gfs/config.com @@ -31,7 +31,7 @@ echo "BEGIN: config.com" # # -# If any restart, input, or analysis template is updated, `setup_expt.py.fill_COMROT_cycled()` +# If any restart, input, or analysis template is updated, `setup_expt.py.fill_ROTDIR_cycled()` # must correspondingly be updated to match. # if [[ "${RUN_ENVIR:-emc}" == "nco" ]]; then diff --git a/parm/config/gfs/config.fcst b/parm/config/gfs/config.fcst index 33237fb6c7..a4c4ee8072 100644 --- a/parm/config/gfs/config.fcst +++ b/parm/config/gfs/config.fcst @@ -119,7 +119,7 @@ fi # PBL/turbulence schemes export hybedmf=".false." -if [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_c3_mynn" ]] ; then +if [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_c3_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_c3_mynn" ]] ; then export satmedmf=".false." export isatmedmf=0 export shal_cnv=".false." @@ -163,7 +163,7 @@ export iopt_trs="2" # Convection Options: 2-SASAS, 3-GF export progsigma=".true." -if [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_c3_mynn" ]] ; then +if [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_c3_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_c3_mynn" ]] ; then export imfdeepcnv=5 export imfshalcnv=-1 ## JKH - no shallow GF elif [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_c3" ]] ; then @@ -211,25 +211,27 @@ case ${imp_physics} in export ncld=2 export nwat=6 - if [[ "$CCPP_SUITE" == "FV3_GFS_v17_p8_ugwpv1_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_mynn" || "$CCPP_SUITE" == "FV3_GFS_v17_p8_c3_mynn" || - "$CCPP_SUITE" == "FV3_GFS_v17_p8_thompson" ]] ; then - export ltaerosol=".true." - export FIELD_TABLE="$HOMEgfs/parm/ufs/fv3/field_table_thompson_aero_tke${tbp}" - else - export ltaerosol=".false." - export FIELD_TABLE="${HOMEgfs}/parm/ufs/fv3/field_table_thompson_noaero_tke${tbp}" - fi - export cal_pre=".false." export random_clds=".false." export effr_in=".true." export lradar=".true." export ttendlim="-999" - export dt_inner=$((DELTIM/2)) export sedi_semi=.true. - if [[ "${sedi_semi}" == .true. ]]; then export dt_inner=${DELTIM} ; fi export decfl=10 + if [[ "${CCPP_SUITE}" == "FV3_GFS_v17_p8_ugwpv1_mynn" || "${CCPP_SUITE}" == "FV3_GFS_v17_p8_ugwpv1_c3_mynn" || "${CCPP_SUITE}" == "FV3_GFS_v17_p8_mynn" || "${CCPP_SUITE}" == "FV3_GFS_v17_p8_c3_mynn" || + "${CCPP_SUITE}" == "FV3_GFS_v17_p8_thompson" ]] ; then + #JKH keep dt_inner $DELTIM/2 (75) if running aerosol-aware Thompson + export dt_inner=$((DELTIM/2)) + export ltaerosol=".true." + export FIELD_TABLE="${HOMEgfs}/parm/ufs/fv3/field_table_thompson_aero_tke${tbp}" + else + export dt_inner=$((DELTIM/2)) + if [[ "${sedi_semi}" == .true. ]]; then export dt_inner=${DELTIM} ; fi + export ltaerosol=".false." + export FIELD_TABLE="${HOMEgfs}/parm/ufs/fv3/field_table_thompson_noaero_tke${tbp}" + fi + export hord_mt_nh_nonmono=5 export hord_xx_nh_nonmono=5 export vtdm4_nh_nonmono=0.02 diff --git a/parm/config/gfs/config.ocnanal b/parm/config/gfs/config.ocnanal index ec45ddd288..38a6cbd52a 100644 --- a/parm/config/gfs/config.ocnanal +++ b/parm/config/gfs/config.ocnanal @@ -17,11 +17,6 @@ export CASE_ANL=@CASE_ANL@ export DOMAIN_STACK_SIZE=116640000 #TODO: Make the stack size resolution dependent export JEDI_BIN=${HOMEgfs}/sorc/gdas.cd/build/bin -# R2D2 -export R2D2_OBS_DB=shared -export R2D2_OBS_DUMP=@R2D2_OBS_DUMP@ -export R2D2_OBS_SRC=@R2D2_OBS_SRC@ -export R2D2_OBS_WINDOW=24 # TODO: Check if the R2D2 sampling DB window is still needed export COMIN_OBS=@COMIN_OBS@ # NICAS diff --git a/parm/config/gfs/config.prepoceanobs b/parm/config/gfs/config.prepoceanobs index 068ecff1ad..d7c4e37bb9 100644 --- a/parm/config/gfs/config.prepoceanobs +++ b/parm/config/gfs/config.prepoceanobs @@ -7,11 +7,14 @@ echo "BEGIN: config.prepoceanobs" export OCNOBS2IODAEXEC=${HOMEgfs}/sorc/gdas.cd/build/bin/gdas_obsprovider2ioda.x export OBS_YAML_DIR=${HOMEgfs}/sorc/gdas.cd/parm/soca/obs/config -export OBSPROC_YAML=@OBSPROC_CONFIG@ +export OBSPROC_YAML=@OBSPROC_YAML@ export OBS_LIST=@SOCA_OBS_LIST@ [[ -n "${OBS_LIST}" ]] || export OBS_LIST=${HOMEgfs}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml export OBS_YAML=${OBS_LIST} +# ocean analysis needs own dmpdir until standard dmpdir has full ocean obs +export DMPDIR=@DMPDIR@ + # Get task specific resources . "${EXPDIR}/config.resources" prepoceanobs echo "END: config.prepoceanobs" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 5d8540d7a3..c179c33df4 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -58,7 +58,7 @@ elif [[ "${machine}" = "AWSPW" ]]; then elif [[ "${machine}" = "ORION" ]]; then export npe_node_max=40 elif [[ "${machine}" = "HERCULES" ]]; then - export npe_node_max=40 + export npe_node_max=80 fi if [[ "${step}" = "prep" ]]; then @@ -907,13 +907,19 @@ elif [[ "${step}" = "eobs" || "${step}" = "eomg" ]]; then export nth_eomg=${nth_eobs} npe_node_eobs=$(echo "${npe_node_max} / ${nth_eobs}" | bc) export npe_node_eobs - export npe_node_eomg=${npe_node_eobs} export is_exclusive=True - #The number of tasks and cores used must be the same for eobs - #For S4, this is accomplished by running 10 tasks/node - if [[ "${machine}" = "S4" ]]; then + # The number of tasks and cores used must be the same for eobs + # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details + # For S4, this is accomplished by running 10 tasks/node + if [[ ${machine} = "S4" ]]; then export npe_node_eobs=10 + elif [[ ${machine} = "HERCULES" ]]; then + # For Hercules, this is only an issue at C384; use 20 tasks/node + if [[ ${CASE} = "C384" ]]; then + export npe_node_eobs=20 + fi fi + export npe_node_eomg=${npe_node_eobs} elif [[ "${step}" = "ediag" ]]; then diff --git a/parm/config/gfs/config.ufs_c768_12x12_2th_1wg40wt b/parm/config/gfs/config.ufs_c768_12x12_2th_1wg40wt index 5fbc6bc651..3aee6a3aa1 100644 --- a/parm/config/gfs/config.ufs_c768_12x12_2th_1wg40wt +++ b/parm/config/gfs/config.ufs_c768_12x12_2th_1wg40wt @@ -72,9 +72,12 @@ case "${machine}" in "WCOSS2") npe_node_max=128 ;; - "HERA" | "ORION" | "HERCULES") + "HERA" | "ORION" ) npe_node_max=40 ;; + "HERCULES" ) + npe_node_max=80 + ;; "JET") case "${PARTITION_BATCH}" in "xjet") @@ -276,7 +279,7 @@ export cplice=".false." export cplchm=".false." export cplwav=".false." export cplwav2atm=".false." -export CCPP_SUITE="FV3_GFS_v17_p8_ugwpv1_c3" +export CCPP_SUITE="FV3_GFS_v17_p8_ugwpv1_c3_mynn" model_list="atm" # Mediator specific settings diff --git a/parm/config/gfs/config.ufs_c768_16x16_2th_2wg40wt b/parm/config/gfs/config.ufs_c768_16x16_2th_2wg40wt index ab41ade0ca..589358a89e 100644 --- a/parm/config/gfs/config.ufs_c768_16x16_2th_2wg40wt +++ b/parm/config/gfs/config.ufs_c768_16x16_2th_2wg40wt @@ -72,9 +72,12 @@ case "${machine}" in "WCOSS2") npe_node_max=128 ;; - "HERA" | "ORION" | "HERCULES") + "HERA" | "ORION" ) npe_node_max=40 ;; + "HERCULES" ) + npe_node_max=80 + ;; "JET") case "${PARTITION_BATCH}" in "xjet") @@ -276,7 +279,7 @@ export cplice=".false." export cplchm=".false." export cplwav=".false." export cplwav2atm=".false." -export CCPP_SUITE="FV3_GFS_v17_p8_ugwpv1_c3" +export CCPP_SUITE="FV3_GFS_v17_p8_ugwpv1_c3_mynn" model_list="atm" # Mediator specific settings diff --git a/parm/config/gfs/yaml/defaults.yaml b/parm/config/gfs/yaml/defaults.yaml index c0298edb18..ade83fa484 100644 --- a/parm/config/gfs/yaml/defaults.yaml +++ b/parm/config/gfs/yaml/defaults.yaml @@ -5,6 +5,7 @@ base: DO_JEDIOCNVAR: "NO" DO_JEDILANDDA: "NO" DO_MERGENSST: "NO" + DO_GOES: "NO" atmanl: IO_LAYOUT_X: 1 diff --git a/scripts/exglobal_archive_emc.sh b/scripts/exglobal_archive_emc.sh index 18217f4efc..2f7e3be972 100755 --- a/scripts/exglobal_archive_emc.sh +++ b/scripts/exglobal_archive_emc.sh @@ -33,7 +33,9 @@ source "${HOMEgfs}/ush/file_utils.sh" [[ ! -d ${ARCDIR} ]] && mkdir -p "${ARCDIR}" nb_copy "${COM_ATMOS_ANALYSIS}/${APREFIX}gsistat" "${ARCDIR}/gsistat.${RUN}.${PDY}${cyc}" -nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +if [[ ${DO_AERO} = "YES" ]]; then + nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +fi nb_copy "${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.anl" "${ARCDIR}/pgbanl.${RUN}.${PDY}${cyc}.grib2" # Archive 1 degree forecast GRIB2 files for verification diff --git a/scripts/exglobal_archive_gsl.sh b/scripts/exglobal_archive_gsl.sh index 7787073f37..b84fe345c2 100755 --- a/scripts/exglobal_archive_gsl.sh +++ b/scripts/exglobal_archive_gsl.sh @@ -9,7 +9,7 @@ source "${HOMEgfs}/ush/preamble.sh" # ICS are restarts and always lag INC by $assim_freq hours ARCHINC_CYC=${ARCH_CYC} ARCHICS_CYC=$((ARCH_CYC-assim_freq)) -if [ "${ARCHICS_CYC}" -lt 0 ]; then +if [[ "${ARCHICS_CYC}" -lt 0 ]]; then ARCHICS_CYC=$((ARCHICS_CYC+24)) fi @@ -21,7 +21,7 @@ APREFIX="${RUN}.t${cyc}z." # Ignore possible spelling error (nothing is misspelled) # shellcheck disable=SC2153 CDATE_MOS=${PDY}${cyc} -if [ "${REALTIME}" = "YES" ]; then +if [[ "${REALTIME}" = "YES" ]]; then CDATE_MOS=$(${NDATE} -24 "${PDY}${cyc}") fi PDY_MOS="${CDATE_MOS:0:8}" @@ -33,7 +33,9 @@ PDY_MOS="${CDATE_MOS:0:8}" #JKH #JKH[[ ! -d ${ARCDIR} ]] && mkdir -p "${ARCDIR}" #JKHnb_copy "${COM_ATMOS_ANALYSIS}/${APREFIX}gsistat" "${ARCDIR}/gsistat.${RUN}.${PDY}${cyc}" -#JKHnb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +#JKHif [[ ${DO_AERO} = "YES" ]]; then +#JKH nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +#JKHfi #JKHnb_copy "${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.anl" "${ARCDIR}/pgbanl.${RUN}.${PDY}${cyc}.grib2" #JKH #JKH# Archive 1 degree forecast GRIB2 files for verification @@ -158,7 +160,7 @@ if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then "${HOMEgfs}/ush/hpssarch_gen.sh" "${RUN}" status=$? - if [ "${status}" -ne 0 ]; then + if [[ "${status}" -ne 0 ]]; then echo "${HOMEgfs}/ush/hpssarch_gen.sh ${RUN} failed, ABORT!" exit "${status}" fi @@ -169,12 +171,12 @@ if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then targrp_list="gfs_pgrb2" - if [ "${ARCH_GAUSSIAN:-"NO"}" = "YES" ]; then + if [[ "${ARCH_GAUSSIAN:-"NO"}" = "YES" ]]; then targrp_list="${targrp_list} gfs_nc" fi #for initial conditions - if [ "${SAVEFCSTIC}" = "YES" ]; then + if [[ "${SAVEFCSTIC}" = "YES" ]]; then targrp_list="${targrp_list} gfs_ics" fi @@ -216,7 +218,7 @@ if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then stat_chgrp=$? ${HSICMD} chmod 640 "${tar_fl}" stat_chgrp=$((stat_chgrp+$?)) - if [ "${stat_chgrp}" -gt 0 ]; then + if [[ "${stat_chgrp}" -gt 0 ]]; then echo "FATAL ERROR: Unable to properly restrict ${tar_fl}!" echo "Attempting to delete ${tar_fl}" ${HSICMD} rm "${tar_fl}" @@ -239,4 +241,3 @@ if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then fi ##end of HPSS archive ############################################################### -exit 0 diff --git a/scripts/exglobal_archive_gsl_nonc.sh b/scripts/exglobal_archive_gsl_nonc.sh new file mode 100755 index 0000000000..ed419f652a --- /dev/null +++ b/scripts/exglobal_archive_gsl_nonc.sh @@ -0,0 +1,243 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +############################################## +# Begin JOB SPECIFIC work +############################################## + +# ICS are restarts and always lag INC by $assim_freq hours +ARCHINC_CYC=${ARCH_CYC} +ARCHICS_CYC=$((ARCH_CYC-assim_freq)) +if [[ "${ARCHICS_CYC}" -lt 0 ]]; then + ARCHICS_CYC=$((ARCHICS_CYC+24)) +fi + +# CURRENT CYCLE +APREFIX="${RUN}.t${cyc}z." + +# Realtime parallels run GFS MOS on 1 day delay +# If realtime parallel, back up CDATE_MOS one day +# Ignore possible spelling error (nothing is misspelled) +# shellcheck disable=SC2153 +CDATE_MOS=${PDY}${cyc} +if [[ "${REALTIME}" = "YES" ]]; then + CDATE_MOS=$(${NDATE} -24 "${PDY}${cyc}") +fi +PDY_MOS="${CDATE_MOS:0:8}" + +############################################################### +# Archive online for verification and diagnostics +############################################################### +#JKHsource "${HOMEgfs}/ush/file_utils.sh" +#JKH +#JKH[[ ! -d ${ARCDIR} ]] && mkdir -p "${ARCDIR}" +#JKHnb_copy "${COM_ATMOS_ANALYSIS}/${APREFIX}gsistat" "${ARCDIR}/gsistat.${RUN}.${PDY}${cyc}" +#JKHif [[ ${DO_AERO} = "YES" ]]; then +#JKH nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +#JKHfi +#JKHnb_copy "${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.anl" "${ARCDIR}/pgbanl.${RUN}.${PDY}${cyc}.grib2" +#JKH +#JKH# Archive 1 degree forecast GRIB2 files for verification +#JKHif [[ "${RUN}" == "gfs" ]]; then +#JKH fhmax=${FHMAX_GFS} +#JKH fhr=0 +#JKH while [ "${fhr}" -le "${fhmax}" ]; do +#JKH fhr2=$(printf %02i "${fhr}") +#JKH fhr3=$(printf %03i "${fhr}") +#JKH nb_copy "${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.f${fhr3}" "${ARCDIR}/pgbf${fhr2}.${RUN}.${PDY}${cyc}.grib2" +#JKH fhr=$((10#${fhr} + 10#${FHOUT_GFS} )) +#JKH done +#JKHfi +#JKHif [[ "${RUN}" == "gdas" ]]; then +#JKH flist="000 003 006 009" +#JKH for fhr in ${flist}; do +#JKH fname="${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.f${fhr}" +#JKH # TODO Shouldn't the archived files also use three-digit tags? +#JKH fhr2=$(printf %02i $((10#${fhr}))) +#JKH nb_copy "${fname}" "${ARCDIR}/pgbf${fhr2}.${RUN}.${PDY}${cyc}.grib2" +#JKH done +#JKHfi +#JKH +#JKHif [[ -s "${COM_ATMOS_TRACK}/avno.t${cyc}z.cyclone.trackatcfunix" ]]; then +#JKH # shellcheck disable=2153 +#JKH PSLOT4=${PSLOT:0:4} +#JKH # shellcheck disable= +#JKH PSLOT4=${PSLOT4^^} +#JKH sed "s:AVNO:${PSLOT4}:g" < "${COM_ATMOS_TRACK}/avno.t${cyc}z.cyclone.trackatcfunix" \ +#JKH > "${ARCDIR}/atcfunix.${RUN}.${PDY}${cyc}" +#JKH sed "s:AVNO:${PSLOT4}:g" < "${COM_ATMOS_TRACK}/avnop.t${cyc}z.cyclone.trackatcfunix" \ +#JKH > "${ARCDIR}/atcfunixp.${RUN}.${PDY}${cyc}" +#JKHfi +#JKH +#JKHif [[ "${RUN}" == "gdas" ]] && [[ -s "${COM_ATMOS_TRACK}/gdas.t${cyc}z.cyclone.trackatcfunix" ]]; then +#JKH # shellcheck disable=2153 +#JKH PSLOT4=${PSLOT:0:4} +#JKH # shellcheck disable= +#JKH PSLOT4=${PSLOT4^^} +#JKH sed "s:AVNO:${PSLOT4}:g" < "${COM_ATMOS_TRACK}/gdas.t${cyc}z.cyclone.trackatcfunix" \ +#JKH > "${ARCDIR}/atcfunix.${RUN}.${PDY}${cyc}" +#JKH sed "s:AVNO:${PSLOT4}:g" < "${COM_ATMOS_TRACK}/gdasp.t${cyc}z.cyclone.trackatcfunix" \ +#JKH > "${ARCDIR}/atcfunixp.${RUN}.${PDY}${cyc}" +#JKHfi +#JKH +#JKHif [ "${RUN}" = "gfs" ]; then +#JKH nb_copy "${COM_ATMOS_GENESIS}/storms.gfso.atcf_gen.${PDY}${cyc}" "${ARCDIR}/." +#JKH nb_copy "${COM_ATMOS_GENESIS}/storms.gfso.atcf_gen.altg.${PDY}${cyc}" "${ARCDIR}/." +#JKH nb_copy "${COM_ATMOS_TRACK}/trak.gfso.atcfunix.${PDY}${cyc}" "${ARCDIR}/." +#JKH nb_copy "${COM_ATMOS_TRACK}/trak.gfso.atcfunix.altg.${PDY}${cyc}" "${ARCDIR}/." +#JKH +#JKH mkdir -p "${ARCDIR}/tracker.${PDY}${cyc}/${RUN}" +#JKH blist="epac natl" +#JKH for basin in ${blist}; do +#JKH if [[ -f ${basin} ]]; then +#JKH cp -rp "${COM_ATMOS_TRACK}/${basin}" "${ARCDIR}/tracker.${PDY}${cyc}/${RUN}" +#JKH fi +#JKH done +#JKHfi +#JKH +#JKH# Archive required gaussian gfs forecast files for Fit2Obs +#JKHif [[ "${RUN}" == "gfs" ]] && [[ "${FITSARC}" = "YES" ]]; then +#JKH VFYARC=${VFYARC:-${ROTDIR}/vrfyarch} +#JKH [[ ! -d ${VFYARC} ]] && mkdir -p "${VFYARC}" +#JKH mkdir -p "${VFYARC}/${RUN}.${PDY}/${cyc}" +#JKH prefix="${RUN}.t${cyc}z" +#JKH fhmax=${FHMAX_FITS:-${FHMAX_GFS}} +#JKH fhr=0 +#JKH while [[ ${fhr} -le ${fhmax} ]]; do +#JKH fhr3=$(printf %03i "${fhr}") +#JKH sfcfile="${COM_ATMOS_HISTORY}/${prefix}.sfcf${fhr3}.nc" +#JKH sigfile="${COM_ATMOS_HISTORY}/${prefix}.atmf${fhr3}.nc" +#JKH nb_copy "${sfcfile}" "${VFYARC}/${RUN}.${PDY}/${cyc}/" +#JKH nb_copy "${sigfile}" "${VFYARC}/${RUN}.${PDY}/${cyc}/" +#JKH (( fhr = 10#${fhr} + 6 )) +#JKH done +#JKHfi + + +############################################################### +# Archive data either to HPSS or locally +if [[ ${HPSSARCH} = "YES" || ${LOCALARCH} = "YES" ]]; then +############################################################### + + # --set the archiving command and create local directories, if necessary + TARCMD="htar" + HSICMD="hsi" + if [[ ${LOCALARCH} = "YES" ]]; then + TARCMD="tar" + HSICMD='' + [[ ! -d "${ATARDIR}/${PDY}${cyc}" ]] && mkdir -p "${ATARDIR}/${PDY}${cyc}" + [[ ! -d "${ATARDIR}/${CDATE_MOS}" ]] && [[ -d "${ROTDIR}/gfsmos.${PDY_MOS}" ]] && [[ "${cyc}" -eq 18 ]] && mkdir -p "${ATARDIR}/${CDATE_MOS}" + fi + + #--determine when to save ICs for warm start and forecast-only runs + SAVEWARMICA="NO" + SAVEWARMICB="NO" + SAVEFCSTIC="NO" + firstday=$(${NDATE} +24 "${SDATE}") + mm="${PDY:2:2}" + dd="${PDY:4:2}" + # TODO: This math yields multiple dates sharing the same nday + nday=$(( (10#${mm}-1)*30+10#${dd} )) + mod=$((nday % ARCH_WARMICFREQ)) + if [[ "${PDY}${cyc}" -eq "${firstday}" ]] && [[ "${cyc}" -eq "${ARCHINC_CYC}" ]]; then SAVEWARMICA="YES" ; fi + if [[ "${PDY}${cyc}" -eq "${firstday}" ]] && [[ "${cyc}" -eq "${ARCHICS_CYC}" ]]; then SAVEWARMICB="YES" ; fi + if [[ "${mod}" -eq 0 ]] && [[ "${cyc}" -eq "${ARCHINC_CYC}" ]]; then SAVEWARMICA="YES" ; fi + if [[ "${mod}" -eq 0 ]] && [[ "${cyc}" -eq "${ARCHICS_CYC}" ]]; then SAVEWARMICB="YES" ; fi + + if [[ "${ARCHICS_CYC}" -eq 18 ]]; then + nday1=$((nday+1)) + mod1=$((nday1 % ARCH_WARMICFREQ)) + if [[ "${mod1}" -eq 0 ]] && [[ "${cyc}" -eq "${ARCHICS_CYC}" ]] ; then SAVEWARMICB="YES" ; fi + if [[ "${mod1}" -ne 0 ]] && [[ "${cyc}" -eq "${ARCHICS_CYC}" ]] ; then SAVEWARMICB="NO" ; fi + if [[ "${PDY}${cyc}" -eq "${SDATE}" ]] && [[ "${cyc}" -eq "${ARCHICS_CYC}" ]] ; then SAVEWARMICB="YES" ; fi + fi + + mod=$((nday % ARCH_FCSTICFREQ)) + if [[ "${mod}" -eq 0 ]] || [[ "${PDY}${cyc}" -eq "${firstday}" ]]; then SAVEFCSTIC="YES" ; fi + + cd "${DATA}" || exit 2 + + "${HOMEgfs}/ush/hpssarch_gen.sh" "${RUN}" + status=$? + if [[ "${status}" -ne 0 ]]; then + echo "${HOMEgfs}/ush/hpssarch_gen.sh ${RUN} failed, ABORT!" + exit "${status}" + fi + + cd "${ROTDIR}" || exit 2 + + if [[ "${RUN}" = "gfs" ]]; then + + targrp_list="gfs_pgrb2" + +#JKH if [ "${ARCH_GAUSSIAN:-"NO"}" = "YES" ]; then +#JKH targrp_list="${targrp_list} gfs_nc" +#JKH fi + + #for initial conditions + if [[ "${SAVEFCSTIC}" = "YES" ]]; then + targrp_list="${targrp_list} gfs_ics" + fi + + fi + + # Turn on extended globbing options + yyyy="${PDY:0:4}" + shopt -s extglob + for targrp in ${targrp_list}; do + set +e + + # Test whether gdas.tar or gdas_restarta.tar will have rstprod data + has_rstprod="NO" + case ${targrp} in + 'gdas'|'gdas_restarta') + # Test for rstprod in each archived file + while IFS= read -r file; do + if [[ -f ${file} ]]; then + group=$( stat -c "%G" "${file}" ) + if [[ "${group}" == "rstprod" ]]; then + has_rstprod="YES" + break + fi + fi + done < "${DATA}/${targrp}.txt" + + ;; + *) ;; + esac + + # Create the tarball + tar_fl="${ATARDIR}/${yyyy}/${PDY}${cyc}/${targrp}.tar" + ${TARCMD} -P -cvf "${tar_fl}" $(cat "${DATA}/${targrp}.txt") + status=$? + + # Change group to rstprod if it was found even if htar/tar failed in case of partial creation + if [[ "${has_rstprod}" == "YES" ]]; then + ${HSICMD} chgrp rstprod "${tar_fl}" + stat_chgrp=$? + ${HSICMD} chmod 640 "${tar_fl}" + stat_chgrp=$((stat_chgrp+$?)) + if [[ "${stat_chgrp}" -gt 0 ]]; then + echo "FATAL ERROR: Unable to properly restrict ${tar_fl}!" + echo "Attempting to delete ${tar_fl}" + ${HSICMD} rm "${tar_fl}" + echo "Please verify that ${tar_fl} was deleted!" + exit "${stat_chgrp}" + fi + fi + + # For safety, test if the htar/tar command failed after changing groups + if [[ "${status}" -ne 0 ]] && [[ "${PDY}${cyc}" -ge "${firstday}" ]]; then + echo "FATAL ERROR: ${TARCMD} ${tar_fl} failed" + exit "${status}" + fi + set_strict + done + # Turn extended globbing back off + shopt -u extglob + +############################################################### +fi ##end of HPSS archive +############################################################### + diff --git a/scripts/exglobal_atmos_pmgr.sh b/scripts/exglobal_atmos_pmgr.sh index 6e4c2ed3f4..86afed962e 100755 --- a/scripts/exglobal_atmos_pmgr.sh +++ b/scripts/exglobal_atmos_pmgr.sh @@ -43,7 +43,7 @@ icnt=1 while [ $icnt -lt 1000 ]; do for fhr in $postjobs; do fhr3=$(printf "%03d" $fhr) - if [ -s ${COMIN}/${RUN}.${cycle}.logf${fhr}.txt -o -s ${COMIN}/${RUN}.${cycle}.logf${fhr3}.txt ]; then + if [ -s ${COM_ATMOS_HISTORY}/${RUN}.${cycle}.atm.logf${fhr3}.txt ]; then if [ $fhr -eq 0 ]; then #### ecflow_client --event release_${RUN}_postanl ecflow_client --event release_postanl diff --git a/scripts/exglobal_atmos_products.sh b/scripts/exglobal_atmos_products.sh index d2c0ed7466..5f0b1db6cf 100755 --- a/scripts/exglobal_atmos_products.sh +++ b/scripts/exglobal_atmos_products.sh @@ -129,7 +129,7 @@ for (( nset=1 ; nset <= downset ; nset++ )); do # Run with MPMD or serial if [[ "${USE_CFP:-}" = "YES" ]]; then - "${HOMEgfs}/ush/run_mpmd.sh" "${DATA}/poescript" + OMP_NUM_THREADS=1 "${HOMEgfs}/ush/run_mpmd.sh" "${DATA}/poescript" export err=$? else chmod 755 "${DATA}/poescript" diff --git a/scripts/exglobal_forecast.sh b/scripts/exglobal_forecast.sh index c629173879..c50cde74f1 100755 --- a/scripts/exglobal_forecast.sh +++ b/scripts/exglobal_forecast.sh @@ -158,7 +158,7 @@ FV3_out [[ ${cplice} = .true. ]] && CICE_out [[ ${cplchm} = .true. ]] && GOCART_out [[ ${esmf_profile:-} = .true. ]] && CPL_out -echo "MAIN: Output copied to COMROT" +echo "MAIN: Output copied to ROTDIR" #------------------------------------------------------------------ diff --git a/sorc/build_all.sh b/sorc/build_all.sh index ccc088acd9..23cf420f1d 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -129,20 +129,27 @@ build_opts["ww3prepost"]="${_verbose_opt} ${_build_ufs_opt}" # Optional DA builds if [[ "${_build_ufsda}" == "YES" ]]; then - build_jobs["gdas"]=8 - big_jobs=$((big_jobs+1)) - build_opts["gdas"]="${_verbose_opt}" + if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" ]]; then + echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build." + else + build_jobs["gdas"]=8 + big_jobs=$((big_jobs+1)) + build_opts["gdas"]="${_verbose_opt}" + fi fi if [[ "${_build_gsi}" == "YES" ]]; then build_jobs["gsi_enkf"]=8 - big_jobs=$((big_jobs+1)) build_opts["gsi_enkf"]="${_verbose_opt}" fi if [[ "${_build_gsi}" == "YES" || "${_build_ufsda}" == "YES" ]] ; then build_jobs["gsi_utils"]=2 build_opts["gsi_utils"]="${_verbose_opt}" - build_jobs["gsi_monitor"]=1 - build_opts["gsi_monitor"]="${_verbose_opt}" + if [[ "${MACHINE_ID}" == "hercules" ]]; then + echo "NOTE: The GSI Monitor is not supported on Hercules. Disabling build." + else + build_jobs["gsi_monitor"]=1 + build_opts["gsi_monitor"]="${_verbose_opt}" + fi fi # Go through all builds and adjust CPU counts down if necessary @@ -168,7 +175,7 @@ if [[ ${requested_cpus} -lt ${_build_job_max} && ${big_jobs} -gt 0 ]]; then extra_cores=$(( _build_job_max - requested_cpus )) extra_cores=$(( extra_cores / big_jobs )) for build in "${!build_jobs[@]}"; do - if [[ "${build}" == "gdas" || "${build}" == "ufs" || "${build}" == "gsi_enkf" ]]; then + if [[ "${build}" == "gdas" || "${build}" == "ufs" ]]; then build_jobs[${build}]=$(( build_jobs[${build}] + extra_cores )) fi done diff --git a/sorc/build_ufs.sh b/sorc/build_ufs.sh index 1907171223..bf6a783039 100755 --- a/sorc/build_ufs.sh +++ b/sorc/build_ufs.sh @@ -5,7 +5,7 @@ cwd=$(pwd) # Default settings APP="S2SWA" -CCPP_SUITES="FV3_GFS_v17_p8_ugwpv1,FV3_GFS_v17_coupled_p8_ugwpv1,FV3_GFS_v17_p8_ugwpv1_mynn,FV3_GFS_v17_p8_ugwpv1_c3" # TODO: does the g-w need to build with all these CCPP_SUITES? +CCPP_SUITES="FV3_GFS_v17_p8_ugwpv1,FV3_GFS_v17_coupled_p8_ugwpv1,FV3_GFS_v17_p8_ugwpv1_c3_mynn,FV3_GFS_v17_p8_ugwpv1_mynn,FV3_GFS_v17_p8_ugwpv1_c3" # TODO: does the g-w need to build with all these CCPP_SUITES? while getopts ":da:j:v" option; do case "${option}" in diff --git a/sorc/gsi_utils.fd b/sorc/gsi_utils.fd index f371890b9f..90481d9618 160000 --- a/sorc/gsi_utils.fd +++ b/sorc/gsi_utils.fd @@ -1 +1 @@ -Subproject commit f371890b9fcb42312da5f6228d87b5a4829e7e3a +Subproject commit 90481d961854e4412ecac49991721e6e63d4b82e diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 1bdb4dd492..ed33b17e72 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -133,7 +133,7 @@ fi #--------------------------------------- #--copy/link NoahMp table form ccpp-physics repository cd "${HOMEgfs}/parm/ufs" || exit 1 -${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_model.fd/FV3/ccpp/physics/physics/noahmptable.tbl" . +${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_model.fd/tests/parm/noahmptable.tbl" . cd "${HOMEgfs}/parm/post" || exit 1 for file in postxconfig-NT-GEFS-ANL.txt postxconfig-NT-GEFS-F00.txt postxconfig-NT-GEFS.txt postxconfig-NT-GFS-ANL.txt \ diff --git a/sorc/ufs_model.fd b/sorc/ufs_model.fd index 991d6527da..4679220223 160000 --- a/sorc/ufs_model.fd +++ b/sorc/ufs_model.fd @@ -1 +1 @@ -Subproject commit 991d6527da22d11016df035998ec1352d0449875 +Subproject commit 4679220223dbb4601b15e0bdb7b39c0f444bf963 diff --git a/sorc/ufs_utils.fd b/sorc/ufs_utils.fd index ce385cedfa..d805336652 160000 --- a/sorc/ufs_utils.fd +++ b/sorc/ufs_utils.fd @@ -1 +1 @@ -Subproject commit ce385cedfa9abd46b0905e8d6486b0339a9e4267 +Subproject commit d80533665273705b2041c494ed3c1bd50f263265 diff --git a/test/README.md b/test/README.md index 8d9d273ce2..f751d4db5c 100644 --- a/test/README.md +++ b/test/README.md @@ -24,11 +24,11 @@ Where `dirA` and `dirB` are the two cycle directories (`.../gfs.YYYYMMDD/HH/`) OR ``` -./diff_ROTDIR.sh rotdir cdate expA expB +./diff_ROTDIR.sh comroot cdate expA expB ``` Where: -- `rotdir` is the root of your rotdirs (the portion of path the experiments share) +- `comroot` is the root of your rotdirs (the portion of path the experiments share) - `cdate` is the datetime of the cycle in YYYMMDDHH format - `expA` and `expB` are the experiment names ($PSLOT) of each experiment diff --git a/ush/hpssarch_gen_emc.sh b/ush/hpssarch_gen_emc.sh index 0a027c7537..c34fff1a84 100755 --- a/ush/hpssarch_gen_emc.sh +++ b/ush/hpssarch_gen_emc.sh @@ -355,8 +355,10 @@ if [[ ${type} == "gdas" ]]; then if [[ -s "${COM_ATMOS_ANALYSIS}/${head}oznstat" ]]; then echo "${COM_ATMOS_ANALYSIS/${ROTDIR}\//}/${head}oznstat" fi - if [[ -s "${COM_CHEM_ANALYSIS}/${head}aerostat" ]]; then - echo "${COM_CHEM_ANALYSIS/${ROTDIR}\//}/${head}aerostat" + if [[ ${DO_AERO} = "YES" ]]; then + if [[ -s "${COM_CHEM_ANALYSIS}/${head}aerostat" ]]; then + echo "${COM_CHEM_ANALYSIS/${ROTDIR}\//}/${head}aerostat" + fi fi if [[ -s "${COM_ATMOS_ANALYSIS}/${head}radstat" ]]; then echo "${COM_ATMOS_ANALYSIS/${ROTDIR}\//}/${head}radstat" diff --git a/ush/load_ufswm_modules.sh b/ush/load_ufswm_modules.sh index da3ab61818..6477a8ff39 100755 --- a/ush/load_ufswm_modules.sh +++ b/ush/load_ufswm_modules.sh @@ -12,8 +12,8 @@ ulimit_s=$( ulimit -S -s ) source "${HOMEgfs}/ush/detect_machine.sh" source "${HOMEgfs}/ush/module-setup.sh" if [[ "${MACHINE_ID}" != "noaacloud" ]]; then - module use "${HOMEgfs}/sorc/ufs_model.fd/tests" - module load modules.ufs_model.lua + module use "${HOMEgfs}/sorc/ufs_model.fd/modulefiles" + module load "ufs_${MACHINE_ID}.intel" module load prod_util if [[ "${MACHINE_ID}" = "wcoss2" ]]; then module load cray-pals diff --git a/versions/build.spack.ver b/versions/build.spack.ver index 28c3a10185..fb5b244bf5 100644 --- a/versions/build.spack.ver +++ b/versions/build.spack.ver @@ -1,18 +1,13 @@ export spack_stack_ver=1.5.1 export spack_env=gsi-addon -export python_ver=3.10.8 export cmake_ver=3.23.1 -export gempak_ver=7.4.2 export jasper_ver=2.0.32 export libpng_ver=1.6.37 export zlib_ver=1.2.13 export esmf_ver=8.5.0 export fms_ver=2023.02.01 -export cdo_ver=2.0.5 -export nco_ver=5.0.6 -export ncl_ver=6.6.2 export hdf5_ver=1.14.0 export netcdf_c_ver=4.9.2 @@ -31,6 +26,3 @@ export g2tmpl_ver=1.10.2 export crtm_ver=2.4.0 export wgrib2_ver=2.0.8 export grib_util_ver=1.3.0 -export py_netcdf4_ver=1.5.8 -export py_pyyaml_ver=5.4.1 -export py_jinja2_ver=3.1.2 diff --git a/versions/build.wcoss2.ver b/versions/build.wcoss2.ver index bb7ee6ac99..046ff5c64e 100644 --- a/versions/build.wcoss2.ver +++ b/versions/build.wcoss2.ver @@ -5,8 +5,6 @@ export cray_mpich_ver=8.1.9 export cmake_ver=3.20.2 -export python_ver=3.8.6 -export gempak_ver=7.14.1 export jasper_ver=2.0.25 export libpng_ver=1.6.37 export zlib_ver=1.2.11 @@ -31,6 +29,5 @@ export ncio_ver=1.1.2 export ncdiag_ver=1.0.0 export g2tmpl_ver=1.10.2 export crtm_ver=2.4.0 -export wgrib2_ver=2.0.8 export upp_ver=10.0.8 diff --git a/versions/fix.ver b/versions/fix.ver index a01e9d4151..13d9b56dd2 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -4,7 +4,7 @@ export aer_ver=20220805 export am_ver=20220805 export chem_ver=20220805 -export cice_ver=20220805 +export cice_ver=20231219 export cpl_ver=20230526 export datm_ver=20220805 export gdas_crtm_ver=20220805 @@ -13,10 +13,10 @@ export gdas_gsibec_ver=20221031 export glwu_ver=20220805 export gsi_ver=20230911 export lut_ver=20220805 -export mom6_ver=20220805 +export mom6_ver=20231219 export orog_ver=20231027 export reg2grb2_ver=20220805 export sfc_climo_ver=20220805 export ugwd_ver=20220805 export verif_ver=20220805 -export wave_ver=20230426 +export wave_ver=20240105 diff --git a/versions/run.hercules.ver b/versions/run.hercules.ver index 4bedeb1e96..43f1b2181d 100644 --- a/versions/run.hercules.ver +++ b/versions/run.hercules.ver @@ -1,7 +1,9 @@ export stack_intel_ver=2021.9.0 export stack_impi_ver=2021.9.0 +export intel_mkl_ver=2023.1.0 export ncl_ver=6.6.2 +export perl_ver=5.36.0 source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/versions/run.orion.ver b/versions/run.orion.ver index ee2f65523b..7671bc028d 100644 --- a/versions/run.orion.ver +++ b/versions/run.orion.ver @@ -4,4 +4,8 @@ export stack_impi_ver=2021.5.1 export ncl_ver=6.6.2 export gempak_ver=7.5.1 +#For metplus jobs, not currently working with spack-stack +#export met_ver=9.1.3 +#export metplus_ver=3.1.1 + source "${HOMEgfs:-}/versions/run.spack.ver" diff --git a/versions/run.spack.ver b/versions/run.spack.ver index 7045f2ed01..c1c13f58df 100644 --- a/versions/run.spack.ver +++ b/versions/run.spack.ver @@ -16,6 +16,8 @@ export gsi_ncdiag_ver=1.1.2 export g2tmpl_ver=1.10.2 export crtm_ver=2.4.0 export wgrib2_ver=2.0.8 +export grib_util_ver=1.3.0 +export prod_util_ver=1.2.2 export py_netcdf4_ver=1.5.8 export py_pyyaml_ver=5.4.1 export py_jinja2_ver=3.1.2 diff --git a/workflow/applications/applications.py b/workflow/applications/applications.py index 766d4aa508..d45b6a9abc 100644 --- a/workflow/applications/applications.py +++ b/workflow/applications/applications.py @@ -62,6 +62,7 @@ def __init__(self, conf: Configuration) -> None: self.do_genesis_fsu = _base.get('DO_GENESIS_FSU', False) self.do_metp = _base.get('DO_METP', False) self.do_upp = not _base.get('WRITE_DOPOST', True) + self.do_goes = _base.get('DO_GOES', False) self.do_mos = _base.get('DO_MOS', False) self.do_hpssarch = _base.get('HPSSARCH', False) diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 29c6b18f43..1ff6cc3723 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -42,7 +42,9 @@ def _get_app_configs(self): configs += ['anal', 'analdiag'] if self.do_jediocnvar: - configs += ['ocnanalprep', 'ocnanalbmat', 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy'] + configs += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat', + 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost', + 'ocnanalvrfy'] if self.do_ocean: configs += ['ocnpost'] @@ -133,8 +135,10 @@ def get_task_names(self): gdas_gfs_common_tasks_before_fcst += ['anal'] if self.do_jediocnvar: - gdas_gfs_common_tasks_before_fcst += ['ocnanalprep', 'ocnanalbmat', 'ocnanalrun', - 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy'] + gdas_gfs_common_tasks_before_fcst += ['prepoceanobs', 'ocnanalprep', + 'ocnanalbmat', 'ocnanalrun', + 'ocnanalchkpt', 'ocnanalpost', + 'ocnanalvrfy'] gdas_gfs_common_tasks_before_fcst += ['sfcanl', 'analcalc'] @@ -207,6 +211,9 @@ def get_task_names(self): gfs_tasks += ['atmupp'] gfs_tasks += ['atmprod'] + if self.do_goes: + gfs_tasks += ['goesupp'] + if self.do_vminmon: gfs_tasks += ['vminmon'] diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py index 564fd382b9..1145863210 100644 --- a/workflow/applications/gfs_forecast_only.py +++ b/workflow/applications/gfs_forecast_only.py @@ -19,7 +19,7 @@ def _get_app_configs(self): if self.do_atm: - if self.do_upp: + if self.do_upp or self.do_goes: configs += ['upp'] configs += ['atmos_products'] @@ -102,6 +102,9 @@ def get_task_names(self): tasks += ['atmprod'] + if self.do_goes: + tasks += ['goesupp'] + if self.do_tracker: tasks += ['tracker'] diff --git a/workflow/c3_mynn_ugwpv1.sh b/workflow/c3_mynn_ugwpv1.sh new file mode 100755 index 0000000000..7e531fd889 --- /dev/null +++ b/workflow/c3_mynn_ugwpv1.sh @@ -0,0 +1,18 @@ +#!/bin/sh +USER=Judy.K.Henderson +GITDIR=/scratch1/BMC/gsd-fv3-dev/jhender/test/gsl_ufs_dev/ ## where your git checkout is located +COMROT=${GITDIR}/FV3GFSrun ## default COMROT directory +EXPDIR=${GITDIR}/FV3GFSwfm ## default EXPDIR directory +#ICSDIR=/scratch1/BMC/gsd-fv3/rtruns/FV3ICS_L127 + +PSLOT=c3_mynn +IDATE=2023112800 +EDATE=2023112800 +RESDET=768 ## 96 192 384 768 + +### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z + +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" + diff --git a/workflow/c3_ugwpv1.sh b/workflow/c3_ugwpv1.sh new file mode 100755 index 0000000000..c3bc5cc36a --- /dev/null +++ b/workflow/c3_ugwpv1.sh @@ -0,0 +1,18 @@ +#!/bin/sh +USER=Judy.K.Henderson +GITDIR=/scratch1/BMC/gsd-fv3-dev/jhender/test/gsl_ufs_dev/ ## where your git checkout is located +COMROT=${GITDIR}/FV3GFSrun ## default COMROT directory +EXPDIR=${GITDIR}/FV3GFSwfm ## default EXPDIR directory +#ICSDIR=/scratch1/BMC/gsd-fv3/rtruns/FV3ICS_L127 + +PSLOT=c3 +IDATE=2023112800 +EDATE=2023112800 +RESDET=768 ## 96 192 384 768 + +### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z + +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" + diff --git a/workflow/gsl_pygraf.xml b/workflow/gsl_pygraf.xml new file mode 100644 index 0000000000..d4ccb20783 --- /dev/null +++ b/workflow/gsl_pygraf.xml @@ -0,0 +1,138 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]> + + + + &EXPDIR;/logs/@Y@m@d@H.log + + + &SDATE; &EDATE; &INTERVAL; + + + + 0 6 12 18 24 30 36 42 48 + 000 006 012 018 024 030 036 042 048 + + + &JOBS_DIR;/remapgrib.ksh + &ACCOUNT; + 1 + 00:35:00 + remapgrib_#T#_&PSLOT; + &ROTDIR;/logs/@Y@m@d@H/remapgrib_#T#.log + ROTDIR&ROTDIR; + CDUMP&CDUMP; + COMPONENT&COMPONENT; + yyyymmdd@Y@m@d + hh@H + fcst#T# + GRID_NAMES201D130D242 + + &ROTDIR;/&CDUMP;.@Y@m@d/@H/products/&COMPONENT;/grib2/0p25/&CDUMP;.t@Hz.pgrb2.&RES;.f#T# + + + + + + + + + full 242 130 201 + full,Africa,Beijing,Cambodia,EPacific,Europe,Taiwan,WAtlantic,WPacific AK,AKZoom,AKZoom2 CONUS,NC,NE,NW,SC,SE,SW NHemi + global.yml globalAK.yml globalCONUS.yml globalNHemi.yml + + + + + + source &PYGRAFDIR;/pre.sh; + cd &PYGRAFDIR;; + python &PYGRAFDIR;/create_graphics.py \ + maps \ + -d &ROTDIR;/&CDUMP;.@Y@m@d/@H/products/&COMPONENT;/grib2/0p25/post/#GRID_ID#\ + -f 0 &FCST_LENGTH; 6 \ + --file_type prs \ + --file_tmpl "&CDUMP;.t@Hz.pgrb2.0p25.f{FCST_TIME:03d}"\ + --images &PYGRAFDIR;/image_lists/#IMGFILE# hourly\ + -m "&PTITLE;" \ + -n ${SLURM_CPUS_ON_NODE:-12} \ + -o &ROTDIR;/&CDUMP;.@Y@m@d/@H/products/&COMPONENT;/pyprd \ + -s @Y@m@d@H \ + --tiles "#TILESET#" \ + -z &ROTDIR;/&CDUMP;.@Y@m@d/@H/products/&COMPONENT;/img + + + &ACCOUNT; + &QUEUE; + &RESOURCES_PYTHON; + &WALLTIME_PYTHON; + --exclusive + FV3GFS_python_maps_#GRID_ID#_@H_ugwpv1_mynn + &ROTDIR;/logs/@Y@m@d@H/python_@Y@m@d@H00_maps_#GRID_ID#_0-6-&FCST_LENGTH;.log + + + + + + + + + + diff --git a/workflow/gsl_template_hera.xml b/workflow/gsl_template_hera.xml index 36080f43a9..8d7caf4b33 100644 --- a/workflow/gsl_template_hera.xml +++ b/workflow/gsl_template_hera.xml @@ -9,12 +9,13 @@ This workflow was automatically generated at 2023-06-13 23:31:49.582810 --> - + + @@ -152,7 +153,6 @@ - - + diff --git a/workflow/hosts/awspw.yaml b/workflow/hosts/awspw.yaml index becb38e236..c683010e0e 100644 --- a/workflow/hosts/awspw.yaml +++ b/workflow/hosts/awspw.yaml @@ -1,8 +1,7 @@ BASE_GIT: '/scratch1/NCEPDEV/global/glopara/git' #TODO: This does not yet exist. DMPDIR: '/scratch1/NCEPDEV/global/glopara/dump' # TODO: This does not yet exist. PACKAGEROOT: '/scratch1/NCEPDEV/global/glopara/nwpara' #TODO: This does not yet exist. -COMROOT: '/scratch1/NCEPDEV/global/glopara/com' #TODO: This does not yet exist. -COMINsyn: '${COMROOT}/gfs/prod/syndat' #TODO: This does not yet exist. +COMINsyn: '/scratch1/NCEPDEV/global/glopara/com/gfs/prod/syndat' #TODO: This does not yet exist. HOMEDIR: '/contrib/${USER}' STMP: '/lustre/${USER}/stmp2/' PTMP: '/lustre/${USER}/stmp4/' diff --git a/workflow/hosts/container.yaml b/workflow/hosts/container.yaml index 879be0bf31..3fd3856679 100644 --- a/workflow/hosts/container.yaml +++ b/workflow/hosts/container.yaml @@ -1,7 +1,6 @@ BASE_GIT: '' DMPDIR: '/home/${USER}' PACKAGEROOT: '' -COMROOT: '' COMINsyn: '' HOMEDIR: '/home/${USER}' STMP: '/home/${USER}' diff --git a/workflow/hosts/hera.yaml b/workflow/hosts/hera.yaml index e0c6c43590..45a868d636 120000 --- a/workflow/hosts/hera.yaml +++ b/workflow/hosts/hera.yaml @@ -1 +1 @@ -hera_gsl_rt.yaml \ No newline at end of file +hera_gsl.yaml \ No newline at end of file diff --git a/workflow/hosts/hera_gsl.yaml b/workflow/hosts/hera_gsl.yaml index ea917d4790..c12cac1559 100644 --- a/workflow/hosts/hera_gsl.yaml +++ b/workflow/hosts/hera_gsl.yaml @@ -5,8 +5,8 @@ PACKAGEROOT: '/scratch1/NCEPDEV/global/glopara/nwpara' COMROOT: '/scratch1/NCEPDEV/global/glopara/com' COMINsyn: '${COMROOT}/gfs/prod/syndat' HOMEDIR: '/scratch1/BMC/gsd-fv3-dev/NCEPDEV/global/${USER}' -STMP: '${HOMEgfs}/${PSLOT}/FV3GFSrun/' -PTMP: '${HOMEgfs}/${PSLOT}/FV3GFSrun/' +STMP: '${HOMEgfs}/FV3GFSrun/' +PTMP: '${HOMEgfs}/FV3GFSrun/' NOSCRUB: $HOMEDIR ACCOUNT: gsd-fv3 SCHEDULER: slurm diff --git a/workflow/hosts/hercules.yaml b/workflow/hosts/hercules.yaml index e977091ba6..58a9589f2f 100644 --- a/workflow/hosts/hercules.yaml +++ b/workflow/hosts/hercules.yaml @@ -2,8 +2,7 @@ BASE_GIT: '/work/noaa/global/glopara/git' DMPDIR: '/work/noaa/rstprod/dump' BASE_CPLIC: '/work/noaa/global/glopara/data/ICSDIR/prototype_ICs' PACKAGEROOT: '/work/noaa/global/glopara/nwpara' -COMROOT: '/work/noaa/global/glopara/com' -COMINsyn: '${COMROOT}/gfs/prod/syndat' +COMINsyn: '/work/noaa/global/glopara/com/gfs/prod/syndat' HOMEDIR: '/work/noaa/global/${USER}' STMP: '/work/noaa/stmp/${USER}' PTMP: '/work/noaa/stmp/${USER}' diff --git a/workflow/hosts/jet_emc.yaml b/workflow/hosts/jet_emc.yaml index 313ce38dc2..00c89b60a1 100644 --- a/workflow/hosts/jet_emc.yaml +++ b/workflow/hosts/jet_emc.yaml @@ -2,8 +2,7 @@ BASE_GIT: '/lfs4/HFIP/hfv3gfs/glopara/git' DMPDIR: '/lfs4/HFIP/hfv3gfs/glopara/dump' BASE_CPLIC: '/mnt/lfs4/HFIP/hfv3gfs/glopara/data/ICSDIR/prototype_ICs' PACKAGEROOT: '/lfs4/HFIP/hfv3gfs/glopara/nwpara' -COMROOT: '/lfs4/HFIP/hfv3gfs/glopara/com' -COMINsyn: '${COMROOT}/gfs/prod/syndat' +COMINsyn: '/lfs4/HFIP/hfv3gfs/glopara/com/gfs/prod/syndat' HOMEDIR: '/lfs4/HFIP/hfv3gfs/${USER}' STMP: '/lfs4/HFIP/hfv3gfs/${USER}/stmp' PTMP: '/lfs4/HFIP/hfv3gfs/${USER}/ptmp' diff --git a/workflow/hosts/jet_gsl.yaml b/workflow/hosts/jet_gsl.yaml index 4f668204af..9da6f2b2aa 100644 --- a/workflow/hosts/jet_gsl.yaml +++ b/workflow/hosts/jet_gsl.yaml @@ -2,7 +2,6 @@ BASE_GIT: '/lfs4/HFIP/hfv3gfs/glopara/git' DMPDIR: '/lfs4/HFIP/hfv3gfs/glopara/dump' BASE_CPLIC: '/mnt/lfs4/HFIP/hfv3gfs/glopara/data/ICSDIR/prototype_ICs' PACKAGEROOT: '/lfs4/HFIP/hfv3gfs/glopara/nwpara' -COMROOT: '/lfs4/HFIP/hfv3gfs/glopara/com' COMINsyn: '/lfs4/HFIP/hwrf-data/hwrf-input/SYNDAT-PLUS' HOMEDIR: '/lfs1/BMC/gsd-fv3-test/NCEPDEV/global/$USER' STMP: '/home/Judy.K.Henderson/scratch1-test/gw_19may23/FV3GFSrun' diff --git a/workflow/hosts/orion.yaml b/workflow/hosts/orion.yaml index 459aee7cf6..4c08a878dc 100644 --- a/workflow/hosts/orion.yaml +++ b/workflow/hosts/orion.yaml @@ -2,8 +2,7 @@ BASE_GIT: '/work/noaa/global/glopara/git' DMPDIR: '/work/noaa/rstprod/dump' BASE_CPLIC: '/work/noaa/global/glopara/data/ICSDIR/prototype_ICs' PACKAGEROOT: '/work/noaa/global/glopara/nwpara' -COMROOT: '/work/noaa/global/glopara/com' -COMINsyn: '${COMROOT}/gfs/prod/syndat' +COMINsyn: '/work/noaa/global/glopara/com/gfs/prod/syndat' HOMEDIR: '/work/noaa/global/${USER}' STMP: '/work/noaa/stmp/${USER}' PTMP: '/work/noaa/stmp/${USER}' diff --git a/workflow/hosts/s4.yaml b/workflow/hosts/s4.yaml index 01b28c1e6b..52a9f7a365 100644 --- a/workflow/hosts/s4.yaml +++ b/workflow/hosts/s4.yaml @@ -2,8 +2,7 @@ BASE_GIT: '/data/prod/glopara/git' DMPDIR: '/data/prod/glopara/dump' BASE_CPLIC: '/data/prod/glopara/coupled_ICs' PACKAGEROOT: '/data/prod/glopara/nwpara' -COMROOT: '/data/prod/glopara/com' -COMINsyn: '${COMROOT}/gfs/prod/syndat' +COMINsyn: '/data/prod/glopara/com/gfs/prod/syndat' HOMEDIR: '/data/users/${USER}' STMP: '/scratch/users/${USER}' PTMP: '/scratch/users/${USER}' diff --git a/workflow/hosts/wcoss2.yaml b/workflow/hosts/wcoss2.yaml index 04a5949b2e..cfb141061c 100644 --- a/workflow/hosts/wcoss2.yaml +++ b/workflow/hosts/wcoss2.yaml @@ -2,8 +2,7 @@ BASE_GIT: '/lfs/h2/emc/global/save/emc.global/git' DMPDIR: '/lfs/h2/emc/dump/noscrub/dump' BASE_CPLIC: '/lfs/h2/emc/global/noscrub/emc.global/data/ICSDIR/prototype_ICs' PACKAGEROOT: '${PACKAGEROOT:-"/lfs/h1/ops/prod/packages"}' -COMROOT: '${COMROOT:-"/lfs/h1/ops/prod/com"}' -COMINsyn: '${COMROOT}/gfs/v16.3/syndat' +COMINsyn: '/lfs/h1/ops/prod/com/gfs/v16.3/syndat' HOMEDIR: '/lfs/h2/emc/global/noscrub/${USER}' STMP: '/lfs/h2/emc/stmp/${USER}' PTMP: '/lfs/h2/emc/ptmp/${USER}' diff --git a/workflow/mynn_ugwpv1.sh b/workflow/mynn_ugwpv1.sh new file mode 100755 index 0000000000..af054257c6 --- /dev/null +++ b/workflow/mynn_ugwpv1.sh @@ -0,0 +1,18 @@ +#!/bin/sh +USER=Judy.K.Henderson +GITDIR=/scratch1/BMC/gsd-fv3-dev/jhender/test/gsl_ufs_dev/ ## where your git checkout is located +COMROT=${GITDIR}/FV3GFSrun ## default COMROT directory +EXPDIR=${GITDIR}/FV3GFSwfm ## default EXPDIR directory +#ICSDIR=/scratch1/BMC/gsd-fv3/rtruns/FV3ICS_L127 + +PSLOT=mynn +IDATE=2023112800 +EDATE=2023112800 +RESDET=768 ## 96 192 384 768 + +### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z + +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" + diff --git a/workflow/p8_ugwpv1.sh b/workflow/p8_ugwpv1.sh index 6ec129797d..148742011c 100755 --- a/workflow/p8_ugwpv1.sh +++ b/workflow/p8_ugwpv1.sh @@ -1,8 +1,9 @@ +#!/bin/sh USER=Judy.K.Henderson GITDIR=/scratch1/BMC/gsd-fv3-dev/jhender/test/gsl_ufs_dev/ ## where your git checkout is located -COMROT=$GITDIR/FV3GFSrun ## default COMROT directory -EXPDIR=$GITDIR/FV3GFSwfm ## default EXPDIR directory -ICSDIR=/scratch1/BMC/gsd-fv3/rtruns/FV3ICS_L127 +COMROT=${GITDIR}/FV3GFSrun ## default COMROT directory +EXPDIR=${GITDIR}/FV3GFSwfm ## default EXPDIR directory +#ICSDIR=/scratch1/BMC/gsd-fv3/rtruns/FV3ICS_L127 PSLOT=p8 IDATE=2023112800 @@ -11,7 +12,7 @@ RESDET=768 ## 96 192 384 768 ### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z -./setup_expt.py gfs forecast-only --pslot $PSLOT --gfs_cyc 1 \ - --idate $IDATE --edate $EDATE --resdet $RESDET \ - --comrot $COMROT --expdir $EXPDIR +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index 680c7d8686..c46d9ad452 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -1,5 +1,5 @@ from applications.applications import AppConfig -from rocoto.tasks import Tasks, create_wf_task +from rocoto.tasks import Tasks import rocoto.rocoto as rocoto @@ -57,41 +57,74 @@ def stage_ic(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('stage_ic') - task = create_wf_task('stage_ic', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'stage_ic' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/stage_ic.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + task = rocoto.create_task(task_dict) return task def waveinit(self): resources = self.get_resource('waveinit') - task = create_wf_task('waveinit', resources, cdump=self.cdump, envar=self.envars, dependency=None) + task_name = f'waveinit' + task_dict = {'task_name': task_name, + 'resources': resources, + 'envars': self.envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/waveinit.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + task = rocoto.create_task(task_dict) return task def fcst(self): + # TODO: Add real dependencies dependencies = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}stage_ic'} + dep_dict = {'type': 'task', 'name': f'stage_ic'} dependencies.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: - dep_dict = {'type': 'task', 'name': f'{self.cdump}waveinit'} + dep_dict = {'type': 'task', 'name': f'waveinit'} dependencies.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) resources = self.get_resource('fcst') - task = create_wf_task('fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + task = rocoto.create_task(task_dict) return task def efcs(self): dependencies = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}stage_ic'} + dep_dict = {'type': 'task', 'name': f'stage_ic'} dependencies.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_wave: - dep_dict = {'type': 'task', 'name': f'{self.cdump}waveinit'} + dep_dict = {'type': 'task', 'name': f'waveinit'} dependencies.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) @@ -100,9 +133,27 @@ def efcs(self): efcsenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#')) groups = self._get_hybgroups(self._base['NMEM_ENS'], self._configs['efcs']['NMEM_EFCSGRP']) + var_dict = {'grp': groups} resources = self.get_resource('efcs') - task = create_wf_task('efcs', resources, cdump=self.cdump, envar=efcsenvars, dependency=dependencies, - metatask='efmn', varname='grp', varval=groups, cycledef='gefs') + + task_name = f'efcs#grp#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': efcsenvars, + 'cycledef': 'gefs', + 'command': f'{self.HOMEgfs}/jobs/rocoto/efcs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': 'efmn', + 'var_dict': var_dict, + 'task_dict': task_dict + } + + task = rocoto.create_task(metatask_dict) return task diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 18208983b8..0f5e184192 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -1,5 +1,5 @@ from applications.applications import AppConfig -from rocoto.tasks import Tasks, create_wf_task +from rocoto.tasks import Tasks from wxflow import timedelta_to_HMS import rocoto.rocoto as rocoto import numpy as np @@ -71,7 +71,19 @@ def stage_ic(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('stage_ic') - task = create_wf_task('stage_ic', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}stage_ic' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump, + 'command': f'{self.HOMEgfs}/jobs/rocoto/stage_ic.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -102,8 +114,19 @@ def prep(self): cycledef = 'gdas' resources = self.get_resource('prep') - task = create_wf_task('prep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}prep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/prep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -111,7 +134,7 @@ def waveinit(self): resources = self.get_resource('waveinit') dependencies = None - cycledef = None + cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump if self.app_config.mode in ['cycled']: deps = [] dep_dict = {'type': 'task', 'name': f'{self.cdump}prep'} @@ -120,8 +143,20 @@ def waveinit(self): dep_dict = {'type': 'cycleexist', 'condition': 'not', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) - cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump - task = create_wf_task('waveinit', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, cycledef=cycledef) + + task_name = f'{self.cdump}waveinit' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/waveinit.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -133,7 +168,19 @@ def waveprep(self): dependencies = rocoto.create_dependency(dep=deps) cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('waveprep') - task = create_wf_task('waveprep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, cycledef=cycledef) + task_name = f'{self.cdump}waveprep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/waveprep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -172,8 +219,19 @@ def aerosol_init(self): cycledef = 'gfs_seq' resources = self.get_resource('aerosol_init') - task = create_wf_task('aerosol_init', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}aerosol_init' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/aerosol_init.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -189,7 +247,19 @@ def anal(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('anal') - task = create_wf_task('anal', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}anal' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/anal.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -209,7 +279,19 @@ def sfcanl(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('sfcanl') - task = create_wf_task('sfcanl', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}sfcanl' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/sfcanl.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -229,7 +311,19 @@ def analcalc(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('analcalc') - task = create_wf_task('analcalc', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}analcalc' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/analcalc.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -241,7 +335,19 @@ def analdiag(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('analdiag') - task = create_wf_task('analdiag', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}analdiag' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/analdiag.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -253,7 +359,19 @@ def prepatmiodaobs(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('prepatmiodaobs') - task = create_wf_task('prepatmiodaobs', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}prepatmiodaobs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/prepatmiodaobs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -277,8 +395,19 @@ def atmanlinit(self): cycledef = 'gdas' resources = self.get_resource('atmanlinit') - task = create_wf_task('atmanlinit', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}atmanlinit' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlinit.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -290,7 +419,19 @@ def atmanlrun(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('atmanlrun') - task = create_wf_task('atmanlrun', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}atmanlrun' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlrun.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -302,7 +443,19 @@ def atmanlfinal(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('atmanlfinal') - task = create_wf_task('atmanlfinal', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}atmanlfinal' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmanlfinal.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -314,7 +467,20 @@ def aeroanlinit(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('aeroanlinit') - task = create_wf_task('aeroanlinit', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}aeroanlinit' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlinit.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) + return task def aeroanlrun(self): @@ -325,7 +491,19 @@ def aeroanlrun(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('aeroanlrun') - task = create_wf_task('aeroanlrun', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}aeroanlrun' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlrun.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -337,7 +515,19 @@ def aeroanlfinal(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('aeroanlfinal') - task = create_wf_task('aeroanlfinal', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}aeroanlfinal' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/aeroanlfinal.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -349,7 +539,19 @@ def preplandobs(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('preplandobs') - task = create_wf_task('preplandobs', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}preplandobs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/preplandobs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -361,10 +563,22 @@ def landanl(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('landanl') - task = create_wf_task('landanl', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}landanl' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/landanl.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task - def ocnanalprep(self): + def prepoceanobs(self): ocean_hist_path = self._template_to_rocoto_cycstring(self._base["COM_OCEAN_HISTORY_TMPL"], {'RUN': 'gdas'}) @@ -374,12 +588,44 @@ def ocnanalprep(self): deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) + resources = self.get_resource('prepoceanobs') + task_name = f'{self.cdump}prepoceanobs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/prepoceanobs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) + + return task + + def ocnanalprep(self): + + deps = [] + dep_dict = {'type': 'task', 'name': f'{self.cdump}prepoceanobs'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps) + resources = self.get_resource('ocnanalprep') - task = create_wf_task('ocnanalprep', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalprep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalprep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -391,11 +637,19 @@ def ocnanalbmat(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('ocnanalbmat') - task = create_wf_task('ocnanalbmat', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalbmat' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalbmat.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -407,11 +661,19 @@ def ocnanalrun(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('ocnanalrun') - task = create_wf_task('ocnanalrun', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalrun' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalrun.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -427,11 +689,19 @@ def ocnanalchkpt(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('ocnanalchkpt') - task = create_wf_task('ocnanalchkpt', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalchkpt' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalchkpt.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -443,11 +713,19 @@ def ocnanalpost(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('ocnanalpost') - task = create_wf_task('ocnanalpost', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalpost' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalpost.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -459,11 +737,19 @@ def ocnanalvrfy(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('ocnanalvrfy') - task = create_wf_task('ocnanalvrfy', - resources, - cdump=self.cdump, - envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}ocnanalvrfy' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalvrfy.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -510,7 +796,19 @@ def _fcst_forecast_only(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) resources = self.get_resource('fcst') - task = create_wf_task('fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -547,8 +845,19 @@ def _fcst_cycled(self): cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('fcst') - task = create_wf_task('fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -572,8 +881,19 @@ def atmanlupp(self): deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') resources = self.get_resource('upp') - task = create_wf_task('atmanlupp', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - cycledef=self.cdump, command='&JOBS_DIR;/upp.sh') + task_name = f'{self.cdump}atmanlupp' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': postenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/upp.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -590,8 +910,19 @@ def atmanlprod(self): deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('atmos_products') - task = create_wf_task('atmanlprod', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - cycledef=self.cdump, command='&JOBS_DIR;/atmos_products.sh') + task_name = f'{self.cdump}atmanlprod' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': postenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmos_products.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -629,14 +960,24 @@ def _get_ufs_postproc_grps(cdump, config): return grp, dep, lst def atmupp(self): + return self._upptask(upp_run='forecast', task_id='atmupp') + + def goesupp(self): + return self._upptask(upp_run='goes', task_id='goesupp') + + def _upptask(self, upp_run="forecast", task_id="atmupp"): + + VALID_UPP_RUN = ["forecast", "goes", "wafs"] + if upp_run not in VALID_UPP_RUN: + raise KeyError(f"{upp_run} is invalid; UPP_RUN options are: {('|').join(VALID_UPP_RUN)}") varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['upp']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} postenvars = self.envars.copy() postenvar_dict = {'FHRLST': '#lst#', - 'UPP_RUN': 'forecast'} + 'UPP_RUN': upp_run} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) @@ -654,9 +995,25 @@ def atmupp(self): dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('upp') - task = create_wf_task('atmupp', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - metatask='atmupp', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef, - command='&JOBS_DIR;/upp.sh') + + task_name = f'{self.cdump}{task_id}#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': postenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/upp.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}{task_id}', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -664,7 +1021,7 @@ def atmprod(self): varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['atmos_products']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} postenvars = self.envars.copy() postenvar_dict = {'FHRLST': '#lst#'} @@ -679,9 +1036,25 @@ def atmprod(self): dependencies = rocoto.create_dependency(dep=deps) cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('atmos_products') - task = create_wf_task('atmprod', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - metatask='atmprod', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef, - command='&JOBS_DIR;/atmos_products.sh') + + task_name = f'{self.cdump}atmprod#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': postenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmos_products.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}atmprod', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -689,11 +1062,11 @@ def ocnpost(self): varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = self._get_ufs_postproc_grps(self.cdump, self._configs['ocnpost']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} postenvars = self.envars.copy() postenvar_dict = {'FHRLST': '#lst#', - 'ROTDIR': self._base.get('ROTDIR')} + 'ROTDIR': self.rotdir} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) @@ -707,8 +1080,25 @@ def ocnpost(self): dependencies = rocoto.create_dependency(dep_condition='or', dep=deps) cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('ocnpost') - task = create_wf_task('ocnpost', resources, cdump=self.cdump, envar=postenvars, dependency=dependencies, - metatask='ocnpost', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef) + + task_name = f'{self.cdump}ocnpost#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': postenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnpost.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}ocnpost', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -722,7 +1112,19 @@ def wavepostsbs(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('wavepostsbs') - task = create_wf_task('wavepostsbs', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}wavepostsbs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostsbs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -733,7 +1135,19 @@ def wavepostbndpnt(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('wavepostbndpnt') - task = create_wf_task('wavepostbndpnt', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}wavepostbndpnt' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpnt.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -746,8 +1160,19 @@ def wavepostbndpntbll(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('wavepostbndpntbll') - task = create_wf_task('wavepostbndpntbll', resources, cdump=self.cdump, envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}wavepostbndpntbll' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostbndpntbll.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -761,7 +1186,19 @@ def wavepostpnt(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('wavepostpnt') - task = create_wf_task('wavepostpnt', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}wavepostpnt' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavepostpnt.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -772,7 +1209,19 @@ def wavegempak(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('wavegempak') - task = create_wf_task('wavegempak', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}wavegempak' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/wavegempak.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -785,7 +1234,19 @@ def waveawipsbulls(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('waveawipsbulls') - task = create_wf_task('waveawipsbulls', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}waveawipsbulls' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/waveawipsbulls.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -796,8 +1257,19 @@ def waveawipsgridded(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('waveawipsgridded') - task = create_wf_task('waveawipsgridded', resources, cdump=self.cdump, envar=self.envars, - dependency=dependencies) + task_name = f'{self.cdump}waveawipsgridded' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/waveawipsgridded.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -808,7 +1280,19 @@ def postsnd(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('postsnd') - task = create_wf_task('postsnd', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}postsnd' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/postsnd.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -828,13 +1312,24 @@ def fbwind(self): dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') resources = self.get_resource('awips') - # TODO: It would be better to use task dependencies on the # individual post jobs rather than data dependencies to avoid # prematurely starting with partial files. Unfortunately, the # ability to "group" post would make this more convoluted than # it should be and not worth the complexity. - task = create_wf_task('fbwind', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}fbwind' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/fbwind.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -885,17 +1380,34 @@ def awips_20km_1p0deg(self): awipsenvars = self.envars.copy() awipsenvar_dict = {'FHRGRP': '#grp#', 'FHRLST': '#lst#', - 'ROTDIR': self._base.get('ROTDIR')} + 'ROTDIR': self.rotdir} for key, value in awipsenvar_dict.items(): awipsenvars.append(rocoto.create_envar(name=key, value=str(value))) varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = self._get_awipsgroups(self.cdump, self._configs['awips']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} resources = self.get_resource('awips') - task = create_wf_task('awips_20km_1p0deg', resources, cdump=self.cdump, envar=awipsenvars, dependency=dependencies, - metatask='awips_20km_1p0deg', varname=varname1, varval=varval1, vardict=vardict) + + task_name = f'{self.cdump}awips_20km_1p0deg#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': awipsenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/awips_20km_1p0deg.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}awips_20km_1p0deg', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -909,17 +1421,34 @@ def awips_g2(self): awipsenvars = self.envars.copy() awipsenvar_dict = {'FHRGRP': '#grp#', 'FHRLST': '#lst#', - 'ROTDIR': self._base.get('ROTDIR')} + 'ROTDIR': self.rotdir} for key, value in awipsenvar_dict.items(): awipsenvars.append(rocoto.create_envar(name=key, value=str(value))) varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = self._get_awipsgroups(self.cdump, self._configs['awips']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} resources = self.get_resource('awips') - task = create_wf_task('awips_g2', resources, cdump=self.cdump, envar=awipsenvars, dependency=dependencies, - metatask='awips_g2', varname=varname1, varval=varval1, vardict=vardict) + + task_name = f'{self.cdump}awips_g2#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': awipsenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/awips_g2.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}awips_g2', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -931,7 +1460,19 @@ def gempak(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('gempak') - task = create_wf_task('gempak', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}gempak' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/gempak.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -942,7 +1483,19 @@ def gempakmeta(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('gempak') - task = create_wf_task('gempakmeta', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}gempakmeta' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/gempakmeta.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -953,7 +1506,19 @@ def gempakmetancdc(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('gempak') - task = create_wf_task('gempakmetancdc', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}gempakmetancdc' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/gempakmetancdc.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -964,17 +1529,42 @@ def gempakncdcupapgif(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('gempak') - task = create_wf_task('gempakncdcupapgif', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}gempakncdcupapgif' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/gempakncdcupapgif.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task def gempakpgrb2spec(self): deps = [] dep_dict = {'type': 'task', 'name': f'{self.cdump}npoess_pgrb2_0p5deg'} + deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('gempak') - task = create_wf_task('gempakpgrb2spec', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}gempakgrb2spec' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/gempakgrb2spec.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -986,7 +1576,19 @@ def npoess_pgrb2_0p5deg(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('npoess') - task = create_wf_task('npoess_pgrb2_0p5deg', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}npoess_pgrb2_0p5deg' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/npoess_pgrb2_0p5deg.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -997,7 +1599,19 @@ def verfozn(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('verfozn') - task = create_wf_task('verfozn', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}verfozn' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/verfozn.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1008,7 +1622,19 @@ def verfrad(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('verfrad') - task = create_wf_task('verfrad', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}verfrad' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/verfrad.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1019,7 +1645,19 @@ def vminmon(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('vminmon') - task = create_wf_task('vminmon', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}vminmon' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/vminmon.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1030,7 +1668,19 @@ def tracker(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('tracker') - task = create_wf_task('tracker', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}tracker' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/tracker.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1041,7 +1691,19 @@ def genesis(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('genesis') - task = create_wf_task('genesis', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}genesis' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/genesis.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1052,7 +1714,19 @@ def genesis_fsu(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('genesis_fsu') - task = create_wf_task('genesis_fsu', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}genesis_fsu' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/genesis_fsu.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1063,7 +1737,19 @@ def fit2obs(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('fit2obs') - task = create_wf_task('fit2obs', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}fit2obs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/fit2obs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1084,10 +1770,28 @@ def metp(self): varname1 = 'metpcase' varval1 = 'g2g1 g2o1 pcp1' + var_dict = {varname1: varval1} resources = self.get_resource('metp') - task = create_wf_task('metp', resources, cdump=self.cdump, envar=metpenvars, dependency=dependencies, - metatask='metp', varname=varname1, varval=varval1) + + task_name = f'{self.cdump}metp#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': metpenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/metp.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}metp', + 'task_dict': task_dict, + 'var_dict': var_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -1098,7 +1802,19 @@ def mos_stn_prep(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_stn_prep') - task = create_wf_task('mos_stn_prep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_stn_prep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_prep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1109,7 +1825,19 @@ def mos_grd_prep(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_grd_prep') - task = create_wf_task('mos_grd_prep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_grd_prep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_prep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1120,7 +1848,19 @@ def mos_ext_stn_prep(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_ext_stn_prep') - task = create_wf_task('mos_ext_stn_prep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_stn_prep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_prep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1131,7 +1871,19 @@ def mos_ext_grd_prep(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_ext_grd_prep') - task = create_wf_task('mos_ext_grd_prep', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_grd_prep' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_prep.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1142,7 +1894,19 @@ def mos_stn_fcst(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_stn_fcst') - task = create_wf_task('mos_stn_fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_stn_fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1156,7 +1920,19 @@ def mos_grd_fcst(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_grd_fcst') - task = create_wf_task('mos_grd_fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_grd_fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1170,7 +1946,19 @@ def mos_ext_stn_fcst(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_ext_stn_fcst') - task = create_wf_task('mos_ext_stn_fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_stn_fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1187,7 +1975,19 @@ def mos_ext_grd_fcst(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_ext_grd_fcst') - task = create_wf_task('mos_ext_grd_fcst', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_grd_fcst' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_fcst.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1198,7 +1998,19 @@ def mos_stn_prdgen(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_stn_prdgen') - task = create_wf_task('mos_stn_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_stn_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_stn_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1212,7 +2024,19 @@ def mos_grd_prdgen(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_grd_prdgen') - task = create_wf_task('mos_grd_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_grd_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_grd_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1226,7 +2050,19 @@ def mos_ext_stn_prdgen(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_ext_stn_prdgen') - task = create_wf_task('mos_ext_stn_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_stn_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_stn_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1243,7 +2079,19 @@ def mos_ext_grd_prdgen(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_ext_grd_prdgen') - task = create_wf_task('mos_ext_grd_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_ext_grd_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_ext_grd_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1254,7 +2102,19 @@ def mos_wx_prdgen(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('mos_wx_prdgen') - task = create_wf_task('mos_wx_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_wx_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_wx_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1268,7 +2128,19 @@ def mos_wx_ext_prdgen(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('mos_wx_ext_prdgen') - task = create_wf_task('mos_wx_ext_prdgen', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}mos_wx_ext_prdgen' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/mos_wx_ext_prdgen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1341,8 +2213,19 @@ def arch(self): cycledef = 'gdas_half,gdas' if self.cdump in ['gdas'] else self.cdump resources = self.get_resource('arch') - task = create_wf_task('arch', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}arch' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/arch.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1359,7 +2242,19 @@ def cleanup(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('cleanup') - task = create_wf_task('cleanup', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}cleanup' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/cleanup.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1373,7 +2268,19 @@ def eobs(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('eobs') - task = create_wf_task('eobs', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}eobs' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/eobs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1388,9 +2295,27 @@ def eomg(self): groups = self._get_hybgroups(self._base['NMEM_ENS'], self._configs['eobs']['NMEM_EOMGGRP']) + var_dict = {'grp': groups} + resources = self.get_resource('eomg') - task = create_wf_task('eomg', resources, cdump=self.cdump, envar=eomgenvars, dependency=dependencies, - metatask='eomn', varname='grp', varval=groups) + task_name = f'{self.cdump}eomg#grp#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': eomgenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/eomg.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}eomn', + 'var_dict': var_dict, + 'task_dict': task_dict, + } + + task = rocoto.create_task(metatask_dict) return task @@ -1401,7 +2326,19 @@ def ediag(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('ediag') - task = create_wf_task('ediag', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}ediag' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ediag.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1415,7 +2352,19 @@ def eupd(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('eupd') - task = create_wf_task('eupd', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}eupd' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/eupd.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1429,8 +2378,19 @@ def atmensanlinit(self): cycledef = "gdas" resources = self.get_resource('atmensanlinit') - task = create_wf_task('atmensanlinit', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}atmensanlinit' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlinit.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1444,7 +2404,19 @@ def atmensanlrun(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('atmensanlrun') - task = create_wf_task('atmensanlrun', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}atmensanlrun' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlrun.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1456,7 +2428,19 @@ def atmensanlfinal(self): dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('atmensanlfinal') - task = create_wf_task('atmensanlfinal', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}atmensanlfinal' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/atmensanlfinal.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1503,11 +2487,28 @@ def _get_ecengroups(): varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = _get_ecengroups() - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} resources = self.get_resource('ecen') - task = create_wf_task('ecen', resources, cdump=self.cdump, envar=ecenenvars, dependency=dependencies, - metatask='ecmn', varname=varname1, varval=varval1, vardict=vardict) + + task_name = f'{self.cdump}ecen#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': ecenenvars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/ecen.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}ecmn', + 'var_dict': var_dict, + 'task_dict': task_dict + } + + task = rocoto.create_task(metatask_dict) return task def esfc(self): @@ -1525,7 +2526,19 @@ def esfc(self): dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('esfc') - task = create_wf_task('esfc', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies) + task_name = f'{self.cdump}esfc' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': self.cdump.replace('enkf', ''), + 'command': f'{self.HOMEgfs}/jobs/rocoto/esfc.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1550,8 +2563,27 @@ def efcs(self): groups = self._get_hybgroups(self._base['NMEM_ENS_GFS'], self._configs['efcs']['NMEM_EFCSGRP_GFS']) cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '') resources = self.get_resource('efcs') - task = create_wf_task('efcs', resources, cdump=self.cdump, envar=efcsenvars, dependency=dependencies, - metatask='efmn', varname='grp', varval=groups, cycledef=cycledef) + + var_dict = {'grp': groups} + + task_name = f'{self.cdump}efcs#grp#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': efcsenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/efcs.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}efmn', + 'var_dict': var_dict, + 'task_dict': task_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -1569,8 +2601,19 @@ def echgres(self): cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump resources = self.get_resource('echgres') - task = create_wf_task('echgres', resources, cdump=self.cdump, envar=self.envars, dependency=dependencies, - cycledef=cycledef) + task_name = f'{self.cdump}echgres' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': self.envars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/echgres.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + task = rocoto.create_task(task_dict) return task @@ -1611,13 +2654,30 @@ def _get_eposgroups(epos): varname1, varname2, varname3 = 'grp', 'dep', 'lst' varval1, varval2, varval3 = _get_eposgroups(self._configs['epos']) - vardict = {varname2: varval2, varname3: varval3} + var_dict = {varname1: varval1, varname2: varval2, varname3: varval3} cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '') resources = self.get_resource('epos') - task = create_wf_task('epos', resources, cdump=self.cdump, envar=eposenvars, dependency=dependencies, - metatask='epmn', varname=varname1, varval=varval1, vardict=vardict, cycledef=cycledef) + + task_name = f'{self.cdump}epos#{varname1}#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': eposenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/epos.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}epmn', + 'var_dict': var_dict, + 'task_dict': task_dict + } + + task = rocoto.create_task(metatask_dict) return task @@ -1636,7 +2696,26 @@ def earc(self): cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '') resources = self.get_resource('earc') - task = create_wf_task('earc', resources, cdump=self.cdump, envar=earcenvars, dependency=dependencies, - metatask='eamn', varname='grp', varval=groups, cycledef=cycledef) + + var_dict = {'grp': groups} + + task_name = f'{self.cdump}earc#grp#' + task_dict = {'task_name': task_name, + 'resources': resources, + 'dependency': dependencies, + 'envars': earcenvars, + 'cycledef': cycledef, + 'command': f'{self.HOMEgfs}/jobs/rocoto/earc.sh', + 'job_name': f'{self.pslot}_{task_name}_@H', + 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', + 'maxtries': '&MAXTRIES;' + } + + metatask_dict = {'task_name': f'{self.cdump}eamn', + 'var_dict': var_dict, + 'task_dict': task_dict + } + + task = rocoto.create_task(metatask_dict) return task diff --git a/workflow/rocoto/rocoto.py b/workflow/rocoto/rocoto.py index c4240622d4..679c0952ed 100644 --- a/workflow/rocoto/rocoto.py +++ b/workflow/rocoto/rocoto.py @@ -10,62 +10,109 @@ Helper module to create tasks, metatasks, and dependencies for Rocoto ''' -__all__ = ['create_task', 'create_metatask', +__all__ = ['create_task', 'add_dependency', 'create_dependency', 'create_envar', 'create_entity', 'create_cycledef'] -def create_metatask(task_dict: Dict[str, Any], metatask_dict: Dict[str, Any]) -> List[str]: +def create_task(task_dict: Dict[str, Any]) -> List[str]: """ - create a Rocoto metatask given a dictionary containing task and metatask information - :param metatask_dict: metatask key-value parameters - :type metatask_dict: dict - :param task_dict: task key-value parameters - :type task_dict: dict - :return: Rocoto metatask - :rtype: list + Create XML for a rocoto task or metatask + + Creates the XML required to define a task and returns the lines + as a list of strings. Tasks can be nested to create metatasks by + defining a key 'task_dict' within the task_dict. When including + a nested task, you also need to provide a 'var_dict' key that + contains a dictionary of variables to loop over. + + All task dicts must include a 'task_name'. + + Innermost tasks (regular tasks) additionally require a 'resources' + key containing a dict of values defining the HPC settings. + + Parameters + ---------- + task_dict: dict + Dictionary of task definitions + + Returns + ------- + str + Strings containing the XML code defining the task + + Raises + ------ + KeyError + If a required key is missing + """ - # Grab metatask info from the metatask_dict - metataskname = metatask_dict.get('metataskname', 'demometatask') - varname = metatask_dict.get('varname', 'demovar') - varval = metatask_dict.get('varval', 1) - vardict = metatask_dict.get('vardict', None) + inner_task_dict = task_dict.pop('task_dict', None) - strings = [f'\n', - '\n', - f'\t{str(varval)}\n'] + if inner_task_dict is None: + strings = _create_innermost_task(task_dict) + + else: + # There is a nested task_dict, so this is a metatask + metataskname = f"{task_dict.get('task_name', 'demometatask')}" + var_dict = task_dict.get('var_dict', None) - if vardict is not None: - for key in vardict.keys(): - value = str(vardict[key]) + strings = [f'\n', + '\n'] + + if var_dict is None: + msg = f'Task {metataskname} has a nested task dict, but has no var_dict' + raise KeyError(msg) + + for key in var_dict.keys(): + value = str(var_dict[key]) strings.append(f'\t{value}\n') - strings.append('\n') - tasklines = create_task(task_dict) - for tl in tasklines: - strings.append(f'{tl}') if tl == '\n' else strings.append(f'\t{tl}') - strings.append('\n') - strings.append('\n') - return strings + strings.append('\n') + task_dict.update(inner_task_dict) + tasklines = create_task(task_dict).splitlines(True) + for tl in tasklines: + strings.append(f'{tl}') if tl == '\n' else strings.append(f'\t{tl}') + strings.append('\n') + strings.append('\n') + return ''.join(strings) -def create_task(task_dict: Dict[str, Any]) -> List[str]: + +def _create_innermost_task(task_dict: Dict[str, Any]) -> List[str]: """ - create a Rocoto task given a dictionary containing task information - :param task_dict: task key-value parameters - :type task_dict: dict - :return: Rocoto task - :rtype: list + Create XML for a regular rocoto task + + Creates the XML required to define a task and returns the lines + as a list of strings. + + All task dicts must include a 'task_name' and a 'resources' + key containing a dict of values defining the HPC settings. + + Parameters + ---------- + task_dict: dict + Dictionary of task definitions + + Returns + ------- + List[str] + List of strings containing the XML code defining the task + + Raises + ------ + KeyError + If a required key is missing + """ # Grab task info from the task_names - taskname = task_dict.get('taskname', 'demotask') + taskname = task_dict.get('task_name', 'demotask') cycledef = task_dict.get('cycledef', 'democycle') maxtries = task_dict.get('maxtries', 3) final = task_dict.get('final', False) command = task_dict.get('command', 'sleep 10') - jobname = task_dict.get('jobname', 'demojob') + jobname = task_dict.get('job_name', 'demojob') resources_dict = task_dict['resources'] account = resources_dict.get('account', 'batch') queue = resources_dict.get('queue', 'debug') diff --git a/workflow/rocoto/tasks_emc.py b/workflow/rocoto/tasks_emc.py index 2d44c00d4d..1c79de0c19 100644 --- a/workflow/rocoto/tasks_emc.py +++ b/workflow/rocoto/tasks_emc.py @@ -5,7 +5,7 @@ import rocoto.rocoto as rocoto from wxflow import Template, TemplateConstants, to_timedelta -__all__ = ['Tasks', 'create_wf_task'] +__all__ = ['Tasks'] class Tasks: @@ -13,6 +13,7 @@ class Tasks: VALID_TASKS = ['aerosol_init', 'stage_ic', 'prep', 'anal', 'sfcanl', 'analcalc', 'analdiag', 'arch', "cleanup", 'prepatmiodaobs', 'atmanlinit', 'atmanlrun', 'atmanlfinal', + 'prepoceanobs', 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy', 'earc', 'ecen', 'echgres', 'ediag', 'efcs', 'eobs', 'eomg', 'epos', 'esfc', 'eupd', @@ -20,7 +21,7 @@ class Tasks: 'aeroanlinit', 'aeroanlrun', 'aeroanlfinal', 'preplandobs', 'landanl', 'fcst', - 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', + 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', 'goesupp', 'ocnpost', 'verfozn', 'verfrad', 'vminmon', 'metp', @@ -42,12 +43,15 @@ def __init__(self, app_config: AppConfig, cdump: str) -> None: # Save dict_configs and base in the internal state (never know where it may be needed) self._configs = self.app_config.configs self._base = self._configs['base'] + self.HOMEgfs = self._base['HOMEgfs'] + self.rotdir = self._base['ROTDIR'] + self.pslot = self._base['PSLOT'] self._base['cycle_interval'] = to_timedelta(f'{self._base["assim_freq"]}H') self.n_tiles = 6 # TODO - this needs to be elsewhere envar_dict = {'RUN_ENVIR': self._base.get('RUN_ENVIR', 'emc'), - 'HOMEgfs': self._base.get('HOMEgfs'), + 'HOMEgfs': self.HOMEgfs, 'EXPDIR': self._base.get('EXPDIR'), 'NET': self._base.get('NET'), 'CDUMP': self.cdump, @@ -197,34 +201,3 @@ def get_task(self, task_name, *args, **kwargs): raise AttributeError(f'"{task_name}" is not a valid task.\n' + 'Valid tasks are:\n' + f'{", ".join(Tasks.VALID_TASKS)}') - - -def create_wf_task(task_name, resources, - cdump='gdas', cycledef=None, envar=None, dependency=None, - metatask=None, varname=None, varval=None, vardict=None, - final=False, command=None): - tasknamestr = f'{cdump}{task_name}' - metatask_dict = None - if metatask is not None: - tasknamestr = f'{tasknamestr}#{varname}#' - metatask_dict = {'metataskname': f'{cdump}{metatask}', - 'varname': f'{varname}', - 'varval': f'{varval}', - 'vardict': vardict} - - cycledefstr = cdump.replace('enkf', '') if cycledef is None else cycledef - - task_dict = {'taskname': f'{tasknamestr}', - 'cycledef': f'{cycledefstr}', - 'maxtries': '&MAXTRIES;', - 'command': f'&JOBS_DIR;/{task_name}.sh' if command is None else command, - 'jobname': f'&PSLOT;_{tasknamestr}_@H', - 'resources': resources, - 'log': f'&ROTDIR;/logs/@Y@m@d@H/{tasknamestr}.log', - 'envars': envar, - 'dependency': dependency, - 'final': final} - - task = rocoto.create_task(task_dict) if metatask is None else rocoto.create_metatask(task_dict, metatask_dict) - - return ''.join(task) diff --git a/workflow/rocoto/tasks_gsl.py b/workflow/rocoto/tasks_gsl.py index 1dfcd07c91..371721bbb9 100644 --- a/workflow/rocoto/tasks_gsl.py +++ b/workflow/rocoto/tasks_gsl.py @@ -5,7 +5,7 @@ import rocoto.rocoto as rocoto from wxflow import Template, TemplateConstants, to_timedelta -__all__ = ['Tasks', 'create_wf_task'] +__all__ = ['Tasks'] class Tasks: @@ -13,6 +13,7 @@ class Tasks: VALID_TASKS = ['aerosol_init', 'stage_ic', 'prep', 'anal', 'sfcanl', 'analcalc', 'analdiag', 'arch', "cleanup", 'prepatmiodaobs', 'atmanlinit', 'atmanlrun', 'atmanlfinal', + 'prepoceanobs', 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy', 'earc', 'ecen', 'echgres', 'ediag', 'efcs', 'eobs', 'eomg', 'epos', 'esfc', 'eupd', @@ -20,7 +21,7 @@ class Tasks: 'aeroanlinit', 'aeroanlrun', 'aeroanlfinal', 'preplandobs', 'landanl', 'fcst', - 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', + 'atmanlupp', 'atmanlprod', 'atmupp', 'atmprod', 'goesupp', 'ocnpost', 'verfozn', 'verfrad', 'vminmon', 'metp', @@ -42,12 +43,15 @@ def __init__(self, app_config: AppConfig, cdump: str) -> None: # Save dict_configs and base in the internal state (never know where it may be needed) self._configs = self.app_config.configs self._base = self._configs['base'] + self.HOMEgfs = self._base['HOMEgfs'] + self.rotdir = self._base['ROTDIR'] + self.pslot = self._base['PSLOT'] self._base['cycle_interval'] = to_timedelta(f'{self._base["assim_freq"]}H') self.n_tiles = 6 # TODO - this needs to be elsewhere envar_dict = {'RUN_ENVIR': self._base.get('RUN_ENVIR', 'emc'), - 'HOMEgfs': self._base.get('HOMEgfs'), + 'HOMEgfs': self.HOMEgfs, 'EXPDIR': self._base.get('EXPDIR'), 'ROTDIR': self._base.get('ROTDIR'), 'NET': self._base.get('NET'), @@ -198,34 +202,3 @@ def get_task(self, task_name, *args, **kwargs): raise AttributeError(f'"{task_name}" is not a valid task.\n' + 'Valid tasks are:\n' + f'{", ".join(Tasks.VALID_TASKS)}') - - -def create_wf_task(task_name, resources, - cdump='gdas', cycledef=None, envar=None, dependency=None, - metatask=None, varname=None, varval=None, vardict=None, - final=False, command=None): - tasknamestr = f'{cdump}{task_name}' - metatask_dict = None - if metatask is not None: - tasknamestr = f'{tasknamestr}#{varname}#' - metatask_dict = {'metataskname': f'{cdump}{metatask}', - 'varname': f'{varname}', - 'varval': f'{varval}', - 'vardict': vardict} - - cycledefstr = cdump.replace('enkf', '') if cycledef is None else cycledef - - task_dict = {'taskname': f'{tasknamestr}', - 'cycledef': f'{cycledefstr}', - 'maxtries': '&MAXTRIES;', - 'command': f'&JOBS_DIR;/{task_name}.sh' if command is None else command, - 'jobname': f'&PSLOT;_{tasknamestr}_@H', - 'resources': resources, - 'log': f'&ROTDIR;/logs/@Y@m@d@H/{tasknamestr}.log', - 'envars': envar, - 'dependency': dependency, - 'final': final} - - task = rocoto.create_task(task_dict) if metatask is None else rocoto.create_metatask(task_dict, metatask_dict) - - return ''.join(task) diff --git a/workflow/rt_mynn_hera.sh b/workflow/rt_mynn_hera.sh index 338d2cd312..d663affc41 100755 --- a/workflow/rt_mynn_hera.sh +++ b/workflow/rt_mynn_hera.sh @@ -11,7 +11,7 @@ RESDET=768 ## 96 192 384 768 ### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z -./setup_expt.py gfs forecast-only --pslot $PSLOT --gfs_cyc 1 \ - --idate $IDATE --edate $EDATE --resdet $RESDET \ - --comrot $COMROT --expdir $EXPDIR +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" diff --git a/workflow/rt_mynn_jet.sh b/workflow/rt_mynn_jet.sh index 9306b6c689..a3831d2971 100755 --- a/workflow/rt_mynn_jet.sh +++ b/workflow/rt_mynn_jet.sh @@ -1,3 +1,4 @@ +#!/bin/sh USER=Judy.K.Henderson GITDIR=/lfs1/BMC/gsd-fv3-test/HFIP/gsl_ufs_rt ## where your git checkout is located PLACEHOLDER, change this! COMROT=$GITDIR/FV3GFSrun ## default COMROT directory @@ -11,7 +12,7 @@ RESDET=768 ## 96 192 384 768 ### gfs_cyc 1 00Z only; gfs_cyc 2 00Z and 12Z -./setup_expt.py gfs forecast-only --pslot $PSLOT --gfs_cyc 1 \ - --idate $IDATE --edate $EDATE --resdet $RESDET \ - --comrot $COMROT --expdir $EXPDIR +./setup_expt.py gfs forecast-only --pslot "${PSLOT}" --gfs_cyc 1 \ + --idate "${IDATE}" --edate "${EDATE}" --resdetatmos "${RESDET}" \ + --comroot "${COMROT}" --expdir "${EXPDIR}" diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py index b1fa439052..7d7ac84aad 100755 --- a/workflow/setup_expt.py +++ b/workflow/setup_expt.py @@ -29,17 +29,17 @@ def makedirs_if_missing(dirname): os.makedirs(dirname) -def fill_COMROT(host, inputs): +def fill_ROTDIR(host, inputs): """ - Method to populate the COMROT for supported modes. + Method to populate the ROTDIR for supported modes. INPUTS: host: host object from class Host inputs: user inputs to setup_expt.py """ fill_modes = { - 'cycled': fill_COMROT_cycled, - 'forecast-only': fill_COMROT_forecasts + 'cycled': fill_ROTDIR_cycled, + 'forecast-only': fill_ROTDIR_forecasts } try: @@ -52,12 +52,12 @@ def fill_COMROT(host, inputs): return -def fill_COMROT_cycled(host, inputs): +def fill_ROTDIR_cycled(host, inputs): """ - Implementation of 'fill_COMROT' for cycled mode + Implementation of 'fill_ROTDIR' for cycled mode """ - comrot = os.path.join(inputs.comrot, inputs.pslot) + rotdir = os.path.join(inputs.comroot, inputs.pslot) do_ocean = do_ice = do_med = False @@ -136,17 +136,17 @@ def link_files_from_src_to_dst(src_dir, dst_dir): memdir = f'mem{ii:03d}' # Link atmospheric files if inputs.start in ['warm']: - dst_dir = os.path.join(comrot, previous_cycle_dir, memdir, dst_atm_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, memdir, dst_atm_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, memdir, src_atm_dir) elif inputs.start in ['cold']: - dst_dir = os.path.join(comrot, current_cycle_dir, memdir, dst_atm_dir) + dst_dir = os.path.join(rotdir, current_cycle_dir, memdir, dst_atm_dir) src_dir = os.path.join(inputs.icsdir, current_cycle_dir, memdir, src_atm_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) # Link ocean files if do_ocean: - dst_dir = os.path.join(comrot, previous_cycle_dir, memdir, dst_ocn_rst_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, memdir, dst_ocn_rst_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, memdir, src_ocn_rst_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) @@ -154,20 +154,20 @@ def link_files_from_src_to_dst(src_dir, dst_dir): # First 1/2 cycle needs a MOM6 increment incfile = f'enkf{inputs.cdump}.t{idatestr[8:]}z.ocninc.nc' src_file = os.path.join(inputs.icsdir, current_cycle_dir, memdir, src_ocn_anl_dir, incfile) - dst_file = os.path.join(comrot, current_cycle_dir, memdir, dst_ocn_anl_dir, incfile) - makedirs_if_missing(os.path.join(comrot, current_cycle_dir, memdir, dst_ocn_anl_dir)) + dst_file = os.path.join(rotdir, current_cycle_dir, memdir, dst_ocn_anl_dir, incfile) + makedirs_if_missing(os.path.join(rotdir, current_cycle_dir, memdir, dst_ocn_anl_dir)) os.symlink(src_file, dst_file) # Link ice files if do_ice: - dst_dir = os.path.join(comrot, previous_cycle_dir, memdir, dst_ice_rst_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, memdir, dst_ice_rst_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, memdir, src_ice_rst_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) # Link mediator files if do_med: - dst_dir = os.path.join(comrot, previous_cycle_dir, memdir, dst_med_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, memdir, dst_med_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, memdir, src_med_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) @@ -178,10 +178,10 @@ def link_files_from_src_to_dst(src_dir, dst_dir): # Link atmospheric files if inputs.start in ['warm']: - dst_dir = os.path.join(comrot, previous_cycle_dir, dst_atm_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, dst_atm_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, src_atm_dir) elif inputs.start in ['cold']: - dst_dir = os.path.join(comrot, current_cycle_dir, dst_atm_dir) + dst_dir = os.path.join(rotdir, current_cycle_dir, dst_atm_dir) src_dir = os.path.join(inputs.icsdir, current_cycle_dir, src_atm_dir) makedirs_if_missing(dst_dir) @@ -189,7 +189,7 @@ def link_files_from_src_to_dst(src_dir, dst_dir): # Link ocean files if do_ocean: - dst_dir = os.path.join(comrot, previous_cycle_dir, dst_ocn_rst_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, dst_ocn_rst_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, src_ocn_rst_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) @@ -197,27 +197,27 @@ def link_files_from_src_to_dst(src_dir, dst_dir): # First 1/2 cycle needs a MOM6 increment incfile = f'{inputs.cdump}.t{idatestr[8:]}z.ocninc.nc' src_file = os.path.join(inputs.icsdir, current_cycle_dir, src_ocn_anl_dir, incfile) - dst_file = os.path.join(comrot, current_cycle_dir, dst_ocn_anl_dir, incfile) - makedirs_if_missing(os.path.join(comrot, current_cycle_dir, dst_ocn_anl_dir)) + dst_file = os.path.join(rotdir, current_cycle_dir, dst_ocn_anl_dir, incfile) + makedirs_if_missing(os.path.join(rotdir, current_cycle_dir, dst_ocn_anl_dir)) os.symlink(src_file, dst_file) # Link ice files if do_ice: - dst_dir = os.path.join(comrot, previous_cycle_dir, dst_ice_rst_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, dst_ice_rst_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, src_ice_rst_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) # Link mediator files if do_med: - dst_dir = os.path.join(comrot, previous_cycle_dir, dst_med_dir) + dst_dir = os.path.join(rotdir, previous_cycle_dir, dst_med_dir) src_dir = os.path.join(inputs.icsdir, previous_cycle_dir, src_med_dir) makedirs_if_missing(dst_dir) link_files_from_src_to_dst(src_dir, dst_dir) # Link bias correction and radiance diagnostics files src_dir = os.path.join(inputs.icsdir, current_cycle_dir, src_atm_anl_dir) - dst_dir = os.path.join(comrot, current_cycle_dir, dst_atm_anl_dir) + dst_dir = os.path.join(rotdir, current_cycle_dir, dst_atm_anl_dir) makedirs_if_missing(dst_dir) for ftype in ['abias', 'abias_pc', 'abias_air', 'radstat']: fname = f'{inputs.cdump}.t{idatestr[8:]}z.{ftype}' @@ -228,9 +228,9 @@ def link_files_from_src_to_dst(src_dir, dst_dir): return -def fill_COMROT_forecasts(host, inputs): +def fill_ROTDIR_forecasts(host, inputs): """ - Implementation of 'fill_COMROT' for forecast-only mode + Implementation of 'fill_ROTDIR' for forecast-only mode """ print('forecast-only mode treats ICs differently and cannot be staged here') @@ -309,9 +309,10 @@ def edit_baseconfig(host, inputs, yaml_dict): "@PSLOT@": inputs.pslot, "@SDATE@": datetime_to_YMDH(inputs.idate), "@EDATE@": datetime_to_YMDH(inputs.edate), - "@CASECTL@": f'C{inputs.resdet}', + "@CASECTL@": f'C{inputs.resdetatmos}', + "@OCNRES@": f"{int(100.*inputs.resdetocean):03d}", "@EXPDIR@": inputs.expdir, - "@ROTDIR@": inputs.comrot, + "@COMROOT@": inputs.comroot, "@EXP_WARM_START@": is_warm_start, "@MODE@": inputs.mode, "@gfs_cyc@": inputs.gfs_cyc, @@ -322,7 +323,7 @@ def edit_baseconfig(host, inputs, yaml_dict): extend_dict = dict() if getattr(inputs, 'nens', 0) > 0: extend_dict = { - "@CASEENS@": f'C{inputs.resens}', + "@CASEENS@": f'C{inputs.resensatmos}', "@NMEM_ENS@": inputs.nens, } tmpl_dict = dict(tmpl_dict, **extend_dict) @@ -387,9 +388,11 @@ def input_args(*argv): def _common_args(parser): parser.add_argument('--pslot', help='parallel experiment name', type=str, required=False, default='test') - parser.add_argument('--resdet', help='resolution of the deterministic model forecast', + parser.add_argument('--resdetatmos', help='atmosphere resolution of the deterministic model forecast', type=int, required=False, default=384) - parser.add_argument('--comrot', help='full path to COMROT', + parser.add_argument('--resdetocean', help='ocean resolution of the deterministic model forecast', + type=float, required=False, default=0.0) # 0.0 (or lower) means determine from resdetatmos (limited combinations will be available) + parser.add_argument('--comroot', help='full path to COMROOT', type=str, required=False, default=os.getenv('HOME')) parser.add_argument('--expdir', help='full path to EXPDIR', type=str, required=False, default=os.getenv('HOME')) @@ -418,7 +421,7 @@ def _gfs_cycled_args(parser): return parser def _gfs_or_gefs_ensemble_args(parser): - parser.add_argument('--resens', help='resolution of the ensemble model forecast', + parser.add_argument('--resensatmos', help='atmosphere resolution of the ensemble model forecast', type=int, required=False, default=192) parser.add_argument('--nens', help='number of ensemble members', type=int, required=False, default=20) @@ -445,7 +448,7 @@ def _gefs_args(parser): description = """ Setup files and directories to start a GFS parallel.\n Create EXPDIR, copy config files.\n - Create COMROT experiment directory structure, + Create ROTDIR experiment directory structure, """ parser = ArgumentParser(description=description, @@ -512,7 +515,7 @@ def query_and_clean(dirname): def validate_user_request(host, inputs): supp_res = host.info['SUPPORTED_RESOLUTIONS'] machine = host.machine - for attr in ['resdet', 'resens']: + for attr in ['resdetatmos', 'resensatmos']: try: expt_res = f'C{getattr(inputs, attr)}' except AttributeError: @@ -521,6 +524,21 @@ def validate_user_request(host, inputs): raise NotImplementedError(f"Supported resolutions on {machine} are:\n{', '.join(supp_res)}") +def get_ocean_resolution(resdetatmos): + """ + Method to determine the ocean resolution based on the atmosphere resolution + Limited options are going to be available + """ + atmos_to_ocean_map = { + 1152: 0.25, 768: 0.25, 384: 0.25, + 192: 1.0, + 96: 5.0, 48: 5.0} + try: + return atmos_to_ocean_map[resdetatmos] + except KeyError: + raise KeyError(f"Ocean resolution for {resdetatmos} is not implemented") + + def main(*argv): user_inputs = input_args(*argv) @@ -528,15 +546,19 @@ def main(*argv): validate_user_request(host, user_inputs) - comrot = os.path.join(user_inputs.comrot, user_inputs.pslot) + # Determine ocean resolution if not provided + if user_inputs.resdetocean <= 0: + user_inputs.resdetocean = get_ocean_resolution(user_inputs.resdetatmos) + + rotdir = os.path.join(user_inputs.comroot, user_inputs.pslot) expdir = os.path.join(user_inputs.expdir, user_inputs.pslot) - create_comrot = query_and_clean(comrot) + create_rotdir = query_and_clean(rotdir) create_expdir = query_and_clean(expdir) - if create_comrot: - makedirs_if_missing(comrot) - fill_COMROT(host, user_inputs) + if create_rotdir: + makedirs_if_missing(rotdir) + fill_ROTDIR(host, user_inputs) if create_expdir: makedirs_if_missing(expdir)