diff --git a/CMakeLists.txt b/CMakeLists.txt index 8100dd0d2..ca0255482 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -63,6 +63,12 @@ if(BUILD_POSTEXEC) endif() endif() +### Switch RUNTIME DESTINATION DIR between bin and exec +set(exec_dir bin) +if(EMC_EXEC_DIR) + set(exec_dir exec) +endif() + add_subdirectory(sorc) add_subdirectory(parm) diff --git a/README.md b/README.md index 6f80fd158..4d403a302 100644 --- a/README.md +++ b/README.md @@ -110,9 +110,9 @@ Builds include: ``` mkdir build cd build -cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/install -make -make test +cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/install +(or cmake .. -DCMAKE_INSTALL_PREFIX=/path/to/install -DEMC_EXEC_DIR=ON) +make -j 4 make install ``` diff --git a/VERSION b/VERSION index 89acc9519..59a550906 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -10.0.11 +10.0.12 diff --git a/modulefiles/cheyenne b/modulefiles/cheyenne new file mode 100644 index 000000000..75ae507e5 --- /dev/null +++ b/modulefiles/cheyenne @@ -0,0 +1,40 @@ +#%Module# + +proc ModulesHelp { } { +puts stderr "Loads modules required for building upp" +} +module-whatis "Loads UPP prerequisites on Cheyenne for Intel 2021.2" + +module purge + +module load cmake/3.18.2 +module load ncarenv/1.3 +module load intel/2021.2 +module load mpt/2.22 +module load ncarcompilers/0.5.0 +module unload netcdf + +module use /glade/p/ral/jntp/GMTB/tools/hpc-stack-v1.2.0/modulefiles/stack +module load hpc/1.2.0 +module load hpc-intel/2021.2 +module load hpc-mpt/2.22 + +module load jasper/2.0.25 +module load zlib/1.2.11 +module load png/1.6.35 + +module load hdf5/1.10.6 +module load netcdf/4.7.4 + +module load bacio/2.4.1 +module load crtm/2.3.0 +module load g2/3.4.2 +module load g2tmpl/1.10.0 +module load ip/3.3.3 +module load nemsio/2.5.2 +module load sfcio/1.4.1 +module load sigio/2.3.2 +module load sp/2.3.3 +module load w3nco/2.4.1 +module load w3emc/2.7.3 +module load wrf_io/1.2.0 diff --git a/modulefiles/cheyenne_gnu b/modulefiles/cheyenne_gnu new file mode 100644 index 000000000..c7dbc8e18 --- /dev/null +++ b/modulefiles/cheyenne_gnu @@ -0,0 +1,41 @@ +#%Module# + +proc ModulesHelp { } { +puts stderr "Loads modules required for building upp" +} +module-whatis "Loads UPP prerequisites on Cheyenne for Intel 2021.2" + +module purge + +module load cmake/3.18.2 +module load ncarenv/1.3 +module load gnu/10.1.0 +module load mpt/2.22 +module load ncarcompilers/0.5.0 +module load python/3.7.9 +module unload netcdf + +module use /glade/p/ral/jntp/GMTB/tools/hpc-stack-v1.2.0/modulefiles/stack +module load hpc/1.2.0 +module load hpc-gnu/10.1.0 +module load hpc-mpt/2.22 + +module load jasper/2.0.25 +module load zlib/1.2.11 +module load png/1.6.35 + +module load hdf5/1.10.6 +module load netcdf/4.7.4 + +module load bacio/2.4.1 +module load crtm/2.3.0 +module load g2/3.4.2 +module load g2tmpl/1.10.0 +module load ip/3.3.3 +module load nemsio/2.5.2 +module load sfcio/1.4.1 +module load sigio/2.3.2 +module load sp/2.3.3 +module load w3nco/2.4.1 +module load w3emc/2.7.3 +module load wrf_io/1.2.0 diff --git a/sorc/ncep_post.fd/ALLOCATE_ALL.f b/sorc/ncep_post.fd/ALLOCATE_ALL.f index a9f19dfe4..5ae2b25a0 100644 --- a/sorc/ncep_post.fd/ALLOCATE_ALL.f +++ b/sorc/ncep_post.fd/ALLOCATE_ALL.f @@ -18,6 +18,7 @@ !! - 21-04-06 Wen Meng - Initializing all allocated arrays !! - 21-04-16 Wen Meng - Initializing aextc55 and extc55 as 0. These !! two arrays are involved in GSL visibility computation. +!! - 22-03-22 Wen Meng - Initializing pwat. !! !! OUTPUT FILES: !! - STDOUT - RUN TIME STANDARD OUT. @@ -970,6 +971,7 @@ SUBROUTINE ALLOCATE_ALL() allocate(tedir(ista_2l:iend_2u,jsta_2l:jend_2u)) allocate(twa(ista_2l:iend_2u,jsta_2l:jend_2u)) allocate(fdnsst(ista_2l:iend_2u,jsta_2l:jend_2u)) + allocate(pwat(ista_2l:iend_2u,jsta_2l:jend_2u)) !Initialization !$omp parallel do private(i,j) do j=jsta_2l,jend_2u @@ -1020,6 +1022,7 @@ SUBROUTINE ALLOCATE_ALL() tedir(i,j)=spval twa(i,j)=spval fdnsst(i,j)=spval + pwat(i,j)=spval enddo enddo ! diff --git a/sorc/ncep_post.fd/CALDRG.f b/sorc/ncep_post.fd/CALDRG.f index 3d3d09278..352d6cf59 100644 --- a/sorc/ncep_post.fd/CALDRG.f +++ b/sorc/ncep_post.fd/CALDRG.f @@ -2,17 +2,20 @@ !> @brief Subroutine that computes drag cofficient. ! !> This rountine computes a surface layer drag coefficient using -!> equation (7.4.1A) in "An introduction to boundary layer -!> meteorology" by Stull (1988, Kluwer Academic Publishers). +!> equation (7.4.1A) in ["An introduction to boundary layer +!> meteorology" by Stull (1988, Kluwer Academic +!> Publishers)](https://link.springer.com/book/10.1007/978-94-009-3027-8). !> -!> @param[out] DRAGCO surface layer drag coefficient +!> @param[out] DRAGCO surface layer drag coefficient. !> -!> Program history -!> - 93-09-01 Russ Treadon -!> - 98-06-15 T Black - Conversion from 1-D to 2-D -!> - 00-01-04 Jim Tuccillo - MPI version -!> - 02-01-15 Mike Baldwin - WRF version -!> - 05-02-22 H Chuang - Add WRF NMM components +!> ### Program History Log +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-09-01 | Russ Treadon | Initial +!> 1998-06-15 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI version +!> 2002-01-15 | Mike Baldwin | WRF version +!> 2005-02-22 | H Chuang | Add WRF NMM components !> !> @author Russ Treadon W/NP2 @date 1993-09-01 SUBROUTINE CALDRG(DRAGCO) diff --git a/sorc/ncep_post.fd/CALDWP.f b/sorc/ncep_post.fd/CALDWP.f index 96e097326..02f309a94 100644 --- a/sorc/ncep_post.fd/CALDWP.f +++ b/sorc/ncep_post.fd/CALDWP.f @@ -1,21 +1,21 @@ !> @file !> @brief Subroutine related to dewpoint temperature. ! -!> Computes dewpoint from P, T, and Q +!> Computes dewpoint from P, T, and Q. !> -!> @param[in] P1D Pressure (Pa) -!> @param[in] Q1D Specific humidity (kg/kg) -!> @param[in] T1D Temperature (K) -!> @param[out] TDWP Dewpoint temperature (K) +!> @param[in] P1D Pressure (Pa). +!> @param[in] Q1D Specific humidity (kg/kg). +!> @param[in] T1D Temperature (K). +!> @param[out] TDWP Dewpoint temperature (K). !> -!> Program history -!> - 92-12-22 Russ Treadon -!> - 93-10-04 Russ Treadon - Added check to bound dewpoint -!> temperature to not exceed the -!> ambient temperature. -!> - 98-06-08 T BLACK - Conversion from 1-D to 2-D -!> - 00-01-04 Jim Tuccillo - MPI version -!> - 21-07-23 Wen Meng - Retrict computation from undefined points +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1992-12-22 | Russ Treadon | Initial +!> 1993-10-04 | Russ Treadon | Added check to bound dewpoint temperature to not exceed the ambient temperature. +!> 1998-06-08 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI version +!> 2021-07-23 | Wen Meng | Retrict computation from undefined points !> !> @author Russ Treadon W/NP2 @date 1992-12-22 SUBROUTINE CALDWP(P1D,Q1D,TDWP,T1D) diff --git a/sorc/ncep_post.fd/CALGUST.f b/sorc/ncep_post.fd/CALGUST.f index 0ba8eb498..cef7b692e 100644 --- a/sorc/ncep_post.fd/CALGUST.f +++ b/sorc/ncep_post.fd/CALGUST.f @@ -1,47 +1,22 @@ !> @file -! . . . -!> SUBPROGRAM: CALGUST COMPUTE MAX WIND LEVEL -!! PRGRMMR: MANIKIN ORG: W/NP2 DATE: 97-03-04 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES SURFACE WIND GUST BY MIXING -!! DOWN MOMENTUM FROM THE LEVEL AT THE HEIGHT OF THE PBL -!! -!! -!! PROGRAM HISTORY LOG: -!! 03-10-15 GEOFF MANIKIN -!! 05-03-09 H CHUANG - WRF VERSION -!! 05-07-07 BINBIN ZHOU - ADD RSM -!! 15-03-11 S Moorthi - set sfcwind to spval if u10 and v10 are spvals -!! for A grid and set gust to just wind -!! (in GSM with nemsio, it appears u10 & v10 have spval) -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALGUST(GUST) -!! INPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT ARGUMENT LIST: -!! GUST - SPEED OF THE MAXIMUM SFC WIND GUST -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! H2V -!! -!! LIBRARY: -!! COMMON - -!! LOOPS -!! OPTIONS -!! MASKS -!! INDX -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes max wind level. +! +!> This routine computes surface wind gust by mixing +!> down momentum from the level at the height of the PBL. +!> +!> @param[out] GUST Speed of the maximum surface wind gust. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2003-10-15 | Geoff Manokin | Initial +!> 2005-03-09 | H Chuang | WRF Version +!> 2005-07-07 | Binbin Zhou | Add RSM +!> 2015-03-11 | S Moorthi | Set sfcwind to spval if u10 and v10 are spvals for A grid and set gust to just wind (in GSM with nemsio, it appears u10 & v10 have spval) +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Geoff Manikin W/NP2 @date 1997-03-04 + SUBROUTINE CALGUST(LPBL,ZPBL,GUST) ! diff --git a/sorc/ncep_post.fd/CALHEL.f b/sorc/ncep_post.fd/CALHEL.f index 8c11bc24c..a69c4260b 100644 --- a/sorc/ncep_post.fd/CALHEL.f +++ b/sorc/ncep_post.fd/CALHEL.f @@ -1,81 +1,44 @@ !> @file -! . . . -!> SUBPROGRAM: CALHEL COMPUTES STORM RELATIVE HELICITY -!! PRGRMMR: BALDWIN ORG: W/NP2 DATE: 94-08-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES ESTIMATED STORM MOTION AND -!! STORM-RELATIVE ENVIRONMENTAL HELICITY. -!! (DAVIES-JONES ET AL 1990) THE ALGORITHM PROCEEDS AS -!! FOLLOWS. -!! -!! THE STORM MOTION COMPUTATION NO LONGER EMPLOYS THE DAVIES AND -!! JOHNS (1993) METHOD WHICH DEFINED STORM MOTION AS 30 DEGREES TO -!! THE RIGHT OF THE 0-6 KM MEAN WIND AT 75% OF THE SPEED FOR MEAN -!! SPEEDS LESS THAN 15 M/S AND 20 DEGREES TO THE RIGHT FOR SPEEDS -!! GREATER THAN 15 M/S. INSTEAD, WE NOW USE THE DYNAMIC METHOD -!! (BUNKERS ET AL. 1998) WHICH HAS BEEN FOUND TO DO BETTER IN -!! CASES WITH 'NON-CLASSIC' HODOGRAPHS (SUCH AS NORTHWEST-FLOW -!! EVENTS) AND DO AS WELL OR BETTER THAN THE OLD METHOD IN MORE -!! CLASSIC SITUATIONS. -!! -!! PROGRAM HISTORY LOG: -!! 94-08-22 MICHAEL BALDWIN -!! 97-03-27 MICHAEL BALDWIN - SPEED UP CODE -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 00-01-10 G MANIKIN - CHANGED TO BUNKERS METHOD -!! 02-05-22 G MANIKIN - NOW ALLOW CHOICE OF COMPUTING -!! HELICITY OVER TWO DIFFERENT -!! (0-1 and 0-3 KM) DEPTHS -!! 03-03-25 G MANIKIN - MODIFIED CODE TO COMPUTE MEAN WINDS -!! USING ARITHMETIC AVERAGES INSTEAD OF -!! MASS WEIGHTING; DIFFERENCES ARE MINOR -!! BUT WANT TO BE CONSISTENT WITH THE -!! BUNKERS METHOD -!! 04-04-16 M PYLE - MINIMAL MODIFICATIONS, BUT PUT INTO -!! NMM WRFPOST CODE -!! 05=02-25 H CHUANG - ADD COMPUTATION FOR ARW A GRID -!! 05-07-07 BINBIN ZHOU - ADD RSM FOR A GRID -!! 19-10-30 Bo CUI - REMOVE "GOTO" STATEMENT -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALHEL(UST,VST,HELI) -!! INPUT ARGUMENT LIST: -!! DPTH - DEPTH IN METERS OVER WHICH HELICITY SHOULD BE COMPUTED; -!! ALLOWS ONE TO DISTINGUISH 0-3 KM AND 0-1 KM VALUES -!! -!! OUTPUT ARGUMENT LIST: -!! UST - ESTIMATED U COMPONENT (M/S) OF STORM MOTION. -!! VST - ESTIMATED V COMPONENT (M/S) OF STORM MOTION. -!! HELI - STORM-RELATIVE HELICITY (M**2/S**2) -!! CRA -!! USHR1 - U COMPONENT (M/S) OF 0-1 KM SHEAR -!! VSHR1 - V COMPONENT (M/S) OF 0-1 KM SHEAR -!! USHR6 - U COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! VSHR6 - V COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! CRA - -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! -!! LIBRARY: -!! COMMON - VRBLS -!! LOOPS -!! PHYS -!! EXTRA -!! MASKS -!! OPTIONS -!! INDX -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : IBM SP -!! +!> @brief Subroutine that computes storm relative helicity. +! +!> This routine computes estimated storm motion and storm-relative +!> environmental helicity. (Davies-Jones et al 1990) the algorithm +!> processd as follows. +!> +!> The storm motion computation no longer employs the Davies and Johns (1993) +!> method which defined storm motion as 30 degress to the right of the 0-6 km +!> mean wind at 75% of the speed for mean speeds less than 15 m/s and 20 degrees +!> to the right for speeds greater than 15 m/s. Instead, we now use the dynamic +!> method (Bunkers et al. 1988) which has been found to do better in cases with +!> 'non-classic' hodographs (such as Northwest-flow events) and do as well or +!> better than the old method in more classic situations. +!> +!> @param[in] DPTH Depth in meters over whcih helicity should be computed; allows one to distinguish 0-3 km and 0-1 km values. +!> @param[out] UST Estimated U Component (m/s) Of Storm motion. +!> @param[out] VST Estimated V Component (m/s) Of Storm motion. +!> @param[out] HELI Storm-relative heliciry (m**2/s**2). +!> @param[out] USHR1 U Component (m/s) Of 0-1 km shear. +!> @param[out] VSHR1 V Component (m/s) Of 0-1 km shear. +!> @param[out] USHR6 U Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> @param[out] VSHR6 V Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1994-08-22 | Michael Baldwin | Initial +!> 1997-03-27 | Michael Baldwin | Speed up code +!> 1998-06-15 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2000-01-10 | G Manikin | Changed to Bunkers method +!> 2002-05-22 | G Manikin | Now allow choice of computing helicity over two different (0-1 and 0-3 km) depths +!> 2003-03-25 | G Manikin | Modified code to compute mean winds using arithmetic averages instead of mass weighting; differences are minor but want to be consistent with the Bunkers method +!> 2004-04-16 | M Pyle | Minimal modifications but put into NMM WRFPOST code +!> 2005-02-25 | H Chuang | Add computation for ARW A grid +!> 2005-07-07 | Binbin Zhou | Add RSM for A grid +!> 2019-10-30 | Bo Cui | Remove "goto" statement +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Michael Baldwin W/NP2 @date 1994-08-22 SUBROUTINE CALHEL(DEPTH,UST,VST,HELI,USHR1,VSHR1,USHR6,VSHR6) ! diff --git a/sorc/ncep_post.fd/CALHEL2.f b/sorc/ncep_post.fd/CALHEL2.f index 183ebcc2a..2c1bb8460 100644 --- a/sorc/ncep_post.fd/CALHEL2.f +++ b/sorc/ncep_post.fd/CALHEL2.f @@ -1,85 +1,47 @@ !> @file -! . . . -!> SUBPROGRAM: CALHEL COMPUTES STORM RELATIVE HELICITY -!! PRGRMMR: BALDWIN ORG: W/NP2 DATE: 94-08-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES ESTIMATED STORM MOTION AND -!! STORM-RELATIVE ENVIRONMENTAL HELICITY. -!! (DAVIES-JONES ET AL 1990) THE ALGORITHM PROCEEDS AS -!! FOLLOWS. -!! -!! THE STORM MOTION COMPUTATION NO LONGER EMPLOYS THE DAVIES AND -!! JOHNS (1993) METHOD WHICH DEFINED STORM MOTION AS 30 DEGREES TO -!! THE RIGHT OF THE 0-6 KM MEAN WIND AT 75% OF THE SPEED FOR MEAN -!! SPEEDS LESS THAN 15 M/S AND 20 DEGREES TO THE RIGHT FOR SPEEDS -!! GREATER THAN 15 M/S. INSTEAD, WE NOW USE THE DYNAMIC METHOD -!! (BUNKERS ET AL. 1998) WHICH HAS BEEN FOUND TO DO BETTER IN -!! CASES WITH 'NON-CLASSIC' HODOGRAPHS (SUCH AS NORTHWEST-FLOW -!! EVENTS) AND DO AS WELL OR BETTER THAN THE OLD METHOD IN MORE -!! CLASSIC SITUATIONS. -!! -!! PROGRAM HISTORY LOG: -!! 94-08-22 MICHAEL BALDWIN -!! 97-03-27 MICHAEL BALDWIN - SPEED UP CODE -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 00-01-10 G MANIKIN - CHANGED TO BUNKERS METHOD -!! 02-05-22 G MANIKIN - NOW ALLOW CHOICE OF COMPUTING -!! HELICITY OVER TWO DIFFERENT -!! (0-1 and 0-3 KM) DEPTHS -!! 03-03-25 G MANIKIN - MODIFIED CODE TO COMPUTE MEAN WINDS -!! USING ARITHMETIC AVERAGES INSTEAD OF -!! MASS WEIGHTING; DIFFERENCES ARE MINOR -!! BUT WANT TO BE CONSISTENT WITH THE -!! BUNKERS METHOD -!! 04-04-16 M PYLE - MINIMAL MODIFICATIONS, BUT PUT INTO -!! NMM WRFPOST CODE -!! 05=02-25 H CHUANG - ADD COMPUTATION FOR ARW A GRID -!! 05-07-07 BINBIN ZHOU - ADD RSM FOR A GRID -!! 19-09-03 J MENG - MODIFIED TO COMPUTE EFFECTIVE HELICITY -!! AND CRITICAL ANGLE -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALHEL(UST,VST,HELI) -!! INPUT ARGUMENT LIST: -!! LLOW - LOWER BOUND CAPE>=100 AND CINS>=-250 -!! LUPP - UPPER BOUND CAPE< 100 OR CINS< -250 -!! DPTH - DEPTH IN METERS OVER WHICH HELICITY SHOULD BE COMPUTED; -!! ALLOWS ONE TO DISTINGUISH 0-3 KM AND 0-1 KM VALUES -!! -!! OUTPUT ARGUMENT LIST: -!! UST - ESTIMATED U COMPONENT (M/S) OF STORM MOTION. -!! VST - ESTIMATED V COMPONENT (M/S) OF STORM MOTION. -!! HELI - STORM-RELATIVE HELICITY (M**2/S**2) -!! CANGLE - CRITICAL ANGLE -!! CRA -!! USHR1 - U COMPONENT (M/S) OF 0-1 KM SHEAR -!! VSHR1 - V COMPONENT (M/S) OF 0-1 KM SHEAR -!! USHR6 - U COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! VSHR6 - V COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! CRA - -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! -!! LIBRARY: -!! COMMON - VRBLS -!! LOOPS -!! PHYS -!! EXTRA -!! MASKS -!! OPTIONS -!! INDX -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : IBM SP -!! +!> @brief Subroutine that computes storm relative helicity. +! +!> This routine computes estimated storm motion and storm-relative +!> environmental helicity. (Davies-Jones et al 1990) the algorithm +!> processd as follows. +!> +!> The storm motion computation no longer employs the Davies and Johns (1993) +!> method which defined storm motion as 30 degress to the right of the 0-6 km +!> mean wind at 75% of the speed for mean speeds less than 15 m/s and 20 degrees +!> to the right for speeds greater than 15 m/s. Instead, we now use the dynamic +!> method (Bunkers et al. 1988) which has been found to do better in cases with +!> 'non-classic' hodographs (such as Northwest-flow events) and do as well or +!> better than the old method in more classic situations. +!> +!> @param[in] LLOW Lower bound CAPE>=100 and CINS>=-250. +!> @param[in] LUPP Upper bound CAPE< 100 or CINS< -250; allows one to distinguish 0-3 km and 0-1 km values. +!> @param[in] DPTH Depth in meters over whcih helicity should be computed; allows one to distinguish 0-3 km and 0-1 km values. +!> @param[out] UST Estimated U Component (m/s) Of Storm motion. +!> @param[out] VST Estimated V Component (m/s) Of Storm motion. +!> @param[out] HELI Storm-relative heliciry (m**2/s**2). +!> @param[out] CANGLE Critical angle. +!> @param[out] USHR1 U Component (m/s) Of 0-1 km shear. +!> @param[out] VSHR1 V Component (m/s) Of 0-1 km shear. +!> @param[out] USHR6 U Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> @param[out] VSHR6 V Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1994-08-22 | Michael Baldwin | Initial +!> 1997-03-27 | Michael Baldwin | Speed up code +!> 1998-06-15 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2000-01-10 | G Manikin | Changed to Bunkers method +!> 2002-05-22 | G Manikin | Now allow choice of computing helicity over two different (0-1 and 0-3 km) depths +!> 2003-03-25 | G Manikin | Modified code to compute mean winds using arithmetic averages instead of mass weighting; differences are minor but want to be consistent with the Bunkers method +!> 2004-04-16 | M Pyle | Minimal modifications but put into NMM WRFPOST code +!> 2005-02-25 | H Chuang | Add computation for ARW A grid +!> 2005-07-07 | Binbin Zhou | Add RSM for A grid +!> 2019-09-03 | J Meng | Modified to compute effective helicity and critical angle +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Michael Baldwin W/NP2 @date 1994-08-22 SUBROUTINE CALHEL2(LLOW,LUPP,DEPTH,UST,VST,HELI,CANGLE) ! diff --git a/sorc/ncep_post.fd/CALHEL3.f b/sorc/ncep_post.fd/CALHEL3.f index 942011340..156911f17 100644 --- a/sorc/ncep_post.fd/CALHEL3.f +++ b/sorc/ncep_post.fd/CALHEL3.f @@ -1,84 +1,47 @@ !> @file -! . . . -!> SUBPROGRAM: CALHEL COMPUTES STORM RELATIVE HELICITY -!! PRGRMMR: BALDWIN ORG: W/NP2 DATE: 94-08-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES ESTIMATED STORM MOTION AND -!! STORM-RELATIVE ENVIRONMENTAL HELICITY. -!! (DAVIES-JONES ET AL 1990) THE ALGORITHM PROCEEDS AS -!! FOLLOWS. -!! -!! THE STORM MOTION COMPUTATION NO LONGER EMPLOYS THE DAVIES AND -!! JOHNS (1993) METHOD WHICH DEFINED STORM MOTION AS 30 DEGREES TO -!! THE RIGHT OF THE 0-6 KM MEAN WIND AT 75% OF THE SPEED FOR MEAN -!! SPEEDS LESS THAN 15 M/S AND 20 DEGREES TO THE RIGHT FOR SPEEDS -!! GREATER THAN 15 M/S. INSTEAD, WE NOW USE THE DYNAMIC METHOD -!! (BUNKERS ET AL. 1998) WHICH HAS BEEN FOUND TO DO BETTER IN -!! CASES WITH 'NON-CLASSIC' HODOGRAPHS (SUCH AS NORTHWEST-FLOW -!! EVENTS) AND DO AS WELL OR BETTER THAN THE OLD METHOD IN MORE -!! CLASSIC SITUATIONS. -!! -!! PROGRAM HISTORY LOG: -!! 94-08-22 MICHAEL BALDWIN -!! 97-03-27 MICHAEL BALDWIN - SPEED UP CODE -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 00-01-10 G MANIKIN - CHANGED TO BUNKERS METHOD -!! 02-05-22 G MANIKIN - NOW ALLOW CHOICE OF COMPUTING -!! HELICITY OVER TWO DIFFERENT -!! (0-1 and 0-3 KM) DEPTHS -!! 03-03-25 G MANIKIN - MODIFIED CODE TO COMPUTE MEAN WINDS -!! USING ARITHMETIC AVERAGES INSTEAD OF -!! MASS WEIGHTING; DIFFERENCES ARE MINOR -!! BUT WANT TO BE CONSISTENT WITH THE -!! BUNKERS METHOD -!! 04-04-16 M PYLE - MINIMAL MODIFICATIONS, BUT PUT INTO -!! NMM WRFPOST CODE -!! 05=02-25 H CHUANG - ADD COMPUTATION FOR ARW A GRID -!! 05-07-07 BINBIN ZHOU - ADD RSM FOR A GRID -!! 19-09-03 J MENG - MODIFIED TO COMPUTE EFFECTIVE HELICITY -!! AND CRITICAL ANGLE -!! 21-03-15 E COLON - CALHEL2 MODIFIED TO COMPUTE EFFECTIVE -!! RATHER THAN FIXED LAYER HELICITY -!! 21-09-02 Bo Cui - Decompose UPP in X direction - -!! USAGE: CALHEL3(UST,VST,HELI) -!! INPUT ARGUMENT LIST: -!! LLOW - LOWER BOUND CAPE>=100 AND CINS>=-250 -!! LUPP - UPPER BOUND CAPE< 100 OR CINS< -250 -!! -!! OUTPUT ARGUMENT LIST: -!! UST - ESTIMATED U COMPONENT (M/S) OF STORM MOTION. -!! VST - ESTIMATED V COMPONENT (M/S) OF STORM MOTION. -!! HELI - STORM-RELATIVE HELICITY (M**2/S**2) -!! CRA -!! USHR1 - U COMPONENT (M/S) OF 0-1 KM SHEAR -!! VSHR1 - V COMPONENT (M/S) OF 0-1 KM SHEAR -!! USHR6 - U COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! VSHR6 - V COMPONENT (M/S) OF 0-0.5 to 5.5-6.0 KM SHEAR -!! CRA - -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! -!! LIBRARY: -!! COMMON - VRBLS -!! LOOPS -!! PHYS -!! EXTRA -!! MASKS -!! OPTIONS -!! INDX -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : IBM SP -!! +!> @brief Subroutine that computes storm relative helicity. +! +!> This routine computes estimated storm motion and storm-relative +!> environmental helicity. (Davies-Jones et al 1990) the algorithm +!> processd as follows. +!> +!> The storm motion computation no longer employs the Davies and Johns (1993) +!> method which defined storm motion as 30 degress to the right of the 0-6 km +!> mean wind at 75% of the speed for mean speeds less than 15 m/s and 20 degrees +!> to the right for speeds greater than 15 m/s. Instead, we now use the dynamic +!> method (Bunkers et al. 1988) which has been found to do better in cases with +!> 'non-classic' hodographs (such as Northwest-flow events) and do as well or +!> better than the old method in more classic situations. +!> +!> @param[in] LLOW Lower bound CAPE>=100 and CINS>=-250. +!> @param[in] LUPP Upper bound CAPE< 100 or CINS< -250; allows one to distinguish 0-3 km and 0-1 km values. +!> @param[out] UST Estimated U Component (m/s) Of Storm motion. +!> @param[out] VST Estimated V Component (m/s) Of Storm motion. +!> @param[out] HELI Storm-relative heliciry (m**2/s**2). +!> @param[out] CANGLE Critical angle. +!> @param[out] USHR1 U Component (m/s) Of 0-1 km shear. +!> @param[out] VSHR1 V Component (m/s) Of 0-1 km shear. +!> @param[out] USHR6 U Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> @param[out] VSHR6 V Component (m/s) Of 0-0.5 to 5.5-6.0 km shear. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1994-08-22 | Michael Baldwin | Initial +!> 1997-03-27 | Michael Baldwin | Speed up code +!> 1998-06-15 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2000-01-10 | G Manikin | Changed to Bunkers method +!> 2002-05-22 | G Manikin | Now allow choice of computing helicity over two different (0-1 and 0-3 km) depths +!> 2003-03-25 | G Manikin | Modified code to compute mean winds using arithmetic averages instead of mass weighting; differences are minor but want to be consistent with the Bunkers method +!> 2004-04-16 | M Pyle | Minimal modifications but put into NMM WRFPOST code +!> 2005-02-25 | H Chuang | Add computation for ARW A grid +!> 2005-07-07 | Binbin Zhou | Add RSM for A grid +!> 2019-09-03 | J Meng | Modified to compute effective helicity and critical angle +!> 2021-03-15 | E Colon | CALHEL2 modified to compute effective rather than fixed layer helicity +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Michael Baldwin W/NP2 @date 1994-08-22 SUBROUTINE CALHEL3(LLOW,LUPP,UST,VST,HELI) ! diff --git a/sorc/ncep_post.fd/CALLCL.f b/sorc/ncep_post.fd/CALLCL.f index 7652e6830..6cc377511 100644 --- a/sorc/ncep_post.fd/CALLCL.f +++ b/sorc/ncep_post.fd/CALLCL.f @@ -1,51 +1,33 @@ !> @file -! -!> SUBPROGRAM: CALLCL COMPUTES LCL HEIGHTS AND PRESSURE -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-03-15 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE LIFTING CONDENSATION LEVEL -!! PRESSURE AND HEIGHT IN EACH COLUMN AT MASS POINTS. -!! THE HEIGHT IS ABOVE GROUND LEVEL. THE EQUATION USED -!! TO FIND THE LCL PRESSURE IS FROM BOLTAN (1980,MWR) -!! AND IS THE SAME AS THAT USED IN SUBROUTINE CALCAPE. -!! -!! THIS ROUTINE IS A TEST VERSION. STILL TO BE RESOLVED -!! IS THE "BEST" PARCEL TO LIFT. -!! -!! PROGRAM HISTORY LOG: -!! 93-03-15 RUSS TREADON -!! 98-06-16 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 02-04-24 MIKE BALDWIN - WRF VERSION -!! 19-10-30 Bo CUI - REMOVE "GOTO" STATEMENT -!! 21-07-28 W Meng - Restriction compuatation from undefined grids -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALLCL(P1D,T1D,Q1D,PLCL,ZLCL) -!! INPUT ARGUMENT LIST: -!! P1D - ARRAY OF PARCEL PRESSURES (PA) -!! T1D - ARRAY OF PARCEL TEMPERATURES (K) -!! Q1D - ARRAY OF PARCEL SPECIFIC HUMIDITIES (KG/KG) -!! -!! OUTPUT ARGUMENT LIST: -!! PLCL - PARCEL PRESSURE AT LCL (PA) -!! ZLCL - PARCEL AGL HEIGHT AT LCL (M) -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - LOOPS -!! OPTIONS -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes LCL heights and pressure. +!> +!> This routine computes the lifting condensation level +!> pressure and height in each column at mass points. +!> The height is above ground level. The equation used +!> to find the LCL pressure is from Boltan (1980, MWR) +!> and is the same as that used in subroutine CALCAPE. +!> +!> This is a test version. Still to be resolved +!> is the "best" parcel to lift. +!> +!> @param[in] P1D Array of parcel pressures (Pa). +!> @param[in] T1D Array of parcel temperatures (K). +!> @param[in] Q1D Array of parcel specific humidities (kg/kg). +!> @param[out] PLCL Parcel Pressure at LCL (Pa). +!> @param[out] ZLCL Parcel AGL height at LCL (m). +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-03-15 | Russ Treadon | Initial +!> 1998-06-16 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-04-24 | Mike Baldwin | WRF Version +!> 2019-10-30 | Bo Cui | Remove "GOTO" Statement +!> 2021-07-28 | W Meng | Restriction compuatation from undefined grids +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1993-03-15 SUBROUTINE CALLCL(P1D,T1D,Q1D,PLCL,ZLCL) ! diff --git a/sorc/ncep_post.fd/CALMCVG.f b/sorc/ncep_post.fd/CALMCVG.f index d2ec706e3..f61cfe7a1 100644 --- a/sorc/ncep_post.fd/CALMCVG.f +++ b/sorc/ncep_post.fd/CALMCVG.f @@ -1,55 +1,37 @@ !> @file -! -!> SUBPROGRAM: CALMCVG COMPUTES MOISTURE CONVERGENCE -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-01-22 -!! -!! ABSTRACT: -!! GIVEN SPECIFIC HUMIDITY, Q, AND THE U-V WIND COMPONENTS -!! THIS ROUTINE EVALUATES THE VECTOR OPERATION, -!! DEL DOT (Q*VEC) -!! WHERE, -!! DEL IS THE VECTOR GRADIENT OPERATOR, -!! DOT IS THE STANDARD DOT PRODUCT OPERATOR, AND -!! VEC IS THE VECTOR WIND. -!! MINUS ONE TIMES THE RESULTING SCALAR FIELD IS THE -!! MOISTURE CONVERGENCE WHICH IS RETURNED BY THIS ROUTINE. -!! -!! PROGRAM HISTORY LOG: -!! 93-01-22 RUSS TREADON -!! 98-06-08 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 02-04-23 MIKE BALDWIN - WRF C-GRID VERSION -!! 05-07-07 BINBIN ZHOU - ADD RSM A GRID -!! 06-04-25 H CHUANG - BUG FIXES TO CORECTLY COMPUTE MC AT BOUNDARIES -!! 21-04-01 J MENG - COMPUTATION ON DEFINED POINTS ONLY -!! 21-09-02 B CUI - REPLACE EXCH_F to EXCH -!! 21-09-30 J MENG - 2D DECOMPOSITION -!! -!! USAGE: CALL CALMCVG(Q1D,U1D,V1D,QCNVG) -!! INPUT ARGUMENT LIST: -!! Q1D - SPECIFIC HUMIDITY AT P-POINTS (KG/KG) -!! U1D - U WIND COMPONENT (M/S) AT P-POINTS -!! V1D - V WIND COMPONENT (M/S) AT P-POINTS -!! -!! OUTPUT ARGUMENT LIST: -!! QCNVG - MOISTURE CONVERGENCE (1/S) AT P-POINTS -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - MASKS -!! DYNAM -!! OPTIONS -!! INDX -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes moisture convergence. +!> +!>
+!> Given specific humidity, Q, and the U-V wind components
+!> This routine evaluates the vector operation, 
+!>                  DEL DOT (Q*VEC)
+!> where,
+!>    DEL is the vector gradient operator,
+!>    DOT is the standard dot product operator, and
+!>    VEC is the vector wind.
+!> Minus one times the resulting scalar field is the 
+!> moisture convergence which is returned by this routine.
+!>
+!> +!> @param[in] Q1D - Specific humidity at P-points (kg/kg). +!> @param[in] U1D - U wind component (m/s) at P-points. +!> @param[in] V1D - V wind component (m/s) at P-points. +!> @param[out] QCNVG - Moisture convergence (1/s) at P-points. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-01-22 | Russ Treadon | Initial +!> 1998-06-08 | T Black | Conversion From 1-D To 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-04-23 | Mike Baldwin | WRF C-Grid Version +!> 2005-07-07 | Binbin Zhou | Add RSM A Grid +!> 2006-04-25 | H Chuang | Bug fixes to correctly compute MC at boundaries +!> 2021-04-01 | J Meng | Computation on defined points only +!> 2021-09-02 | B CUI | REPLACE EXCH_F to EXCH +!> 2021-09-30 | J MENG | 2D DECOMPOSITION +!> +!> @author Russ Treadon W/NP2 @date 1993-01-22 SUBROUTINE CALMCVG(Q1D,U1D,V1D,QCNVG) ! diff --git a/sorc/ncep_post.fd/CALMICT.f b/sorc/ncep_post.fd/CALMICT.f index e4998cc32..9bd053a8d 100644 --- a/sorc/ncep_post.fd/CALMICT.f +++ b/sorc/ncep_post.fd/CALMICT.f @@ -1,59 +1,38 @@ !> @file -! . . . -!> SUBPROGRAM: CALMIC COMPUTES HYDROMETEORS -!! PRGRMMR: JIN ORG: W/NP2 DATE: 01-08-14 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE MIXING RATIOS OF CLOUD WATER, -!! CLOUD ICE, RAIN, AND SNOW. THE CODE IS BASED ON SUBROUTINES -!! GSMDRIVE & GSMCOLUMN IN THE NMM MODEL. -!! -!! PROGRAM HISTORY LOG: -!! 01-08-14 YI JIN -!! 02-02-11 Brad Ferrier - Minor changes for consistency w/ NMM model -!! 04-11-10 Brad Ferrier - Removed cloud fraction algorithm -!! 04-11-17 H CHUANG - WRF VERSION -!! 14-03-11 B Ferrier - Created new & old versions of this subroutine -!! to process new & old versions of the microphysics -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALMICT_new(P1D,T1D,Q1D,C1D,FI1D,FR1D,FS1D,CUREFL -!! &, QW1,QI1,QR1,QS1,DBZ1,DBZR1,DBZI1,DBZC1) -!! INPUT ARGUMENT LIST: -!! P1D - PRESSURE (PA) -!! T1D - TEMPERATURE (K) -!! Q1D - SPECIFIC HUMIDITY (KG/KG) -!! C1D - TOTAL CONDENSATE (CWM, KG/KG) -!! FI1D - F_ice (fraction of condensate in form of ice) -!! FR1D - F_rain (fraction of liquid water in form of rain) -!! FS1D - F_RimeF ("Rime Factor", ratio of total ice growth -!! to deposition growth) -!! CUREFL - Radar reflectivity contribution from convection (mm**6/m**3) -!! -!! OUTPUT ARGUMENT LIST: -!! QW1 - CLOUD WATER MIXING RATIO (KG/KG) -!! QI1 - CLOUD ICE MIXING RATIO (KG/KG) -!! QR1 - RAIN MIXING RATIO (KG/KG) -!! QS1 - "SNOW" (precipitation ice) MIXING RATIO (KG/KG) -!! DBZ1 - Equivalent radar reflectivity factor in dBZ; i.e., 10*LOG10(Z) -!! DBZR - Equivalent radar reflectivity factor from rain in dBZ -!! DBZI - Equivalent radar reflectivity factor from ice (all forms) in dBZ -!! DBZC - Equivalent radar reflectivity factor from parameterized convection in dBZ -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! FUNCTIONS: -!! FPVS -!! UTILITIES: -!! LIBRARY: -!! NONE -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : IBM SP -!! +!> @brief Subroutine that computes hydrometeors. +!> +!> This routin computes the mixing ratios of cloud water, +!> cloud ice, rain, and snow. The code is based on subroutines +!> GSMDRIVE and GSMCOLUMN in the NMM model. +!> +!> @param[in] P1D Pressure (Pa). +!> @param[in] T1D Temperature (K). +!> @param[in] Q1D Specific humidity (kg/kg). +!> @param[in] C1D Total condensate (CWM, kg/kg). +!> @param[in] FI1D F_ice (fraction of condensate in form of ice). +!> @param[in] FR1D F_rain (fraction of liquid water in form of rain). +!> @param[in] FS1D F_RimeF ("Rime Factor", ratio of total ice growth to deposition growth). +!> @param[in] CUREFL Radar reflectivity contribution from convection (mm**6/m**3). +!> @param[out] QW1 Cloud water mixing ratio (kg/kg). +!> @param[out] QI1 Cloud ice mixing ratio (kg/kg). +!> @param[out] QR1 Rain mixing ratio (kg/kg). +!> @param[out] QS1 "Snow" (precipitation ice) mixing ratio (kg/kg). +!> @param[out] DBZ1 Equivalent radar reflectivity factor in dBZ; i.e., 10*LOG10(Z). +!> @param[out] DBZR Equivalent radar reflectivity factor from rain in dBZ. +!> @param[out] DBZI Equivalent radar reflectivity factor from ice (all forms) in dBZ. +!> @param[out] DBZC Equivalent radar reflectivity factor from parameterized convection in dBZ. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2001-08-14 | Yi Jin | Initial +!> 2002-02-11 | Brad Ferrier | Minor changes for consistency w/ NMM model +!> 2004-11-10 | Brad Ferrier | Removed cloud fraction algorithm +!> 2004-11-17 | H Chuang | WRF VERSION +!> 2014-03-11 | Brad Ferrier | Created new & old versions of this subroutine to process new & old versions of the microphysics +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Yi Jin W/NP2 @date 2001-08-14 SUBROUTINE CALMICT_new(P1D,T1D,Q1D,C1D,FI1D,FR1D,FS1D,CUREFL, & QW1,QI1,QR1,QS1,DBZ1,DBZR1,DBZI1,DBZC1,NLICE1,NRAIN1) @@ -322,66 +301,39 @@ SUBROUTINE CALMICT_new(P1D,T1D,Q1D,C1D,FI1D,FR1D,FS1D,CUREFL, & ! SUBROUTINE CALMICT_old(P1D,T1D,Q1D,C1D,FI1D,FR1D,FS1D,CUREFL, & QW1,QI1,QR1,QS1,DBZ1,DBZR1,DBZI1,DBZC1,NLICE1,NRAIN1) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALMICT_old COMPUTES HYDROMETEORS FROM THE OLDER VERSION -! OF THE MICROPHYSICS -! -! PRGRMMR: JIN ORG: W/NP2 DATE: 01-08-14 -! -! ABSTRACT: -! THIS ROUTINE COMPUTES THE MIXING RATIOS OF CLOUD WATER, CLOUD ICE, -! RAIN, AND SNOW. THE CODE IS BASED ON OPTION MP_PHYSICS==95 IN THE -! WRF NAMELIST AND OPTION MICRO='fer' in NMMB CONFIGURE FILES. -! -! PROGRAM HISTORY LOG: -! 01-08-14 YI JIN -! 02-02-11 Brad Ferrier - Minor changes for consistency w/ NMM model -! 04-11-10 Brad Ferrier - Removed cloud fraction algorithm -! 04-11-17 H CHUANG - WRF VERSION -! 14-03-11 B Ferrier - Created new & old versions of this subroutine -! to process new & old versions of the microphysics -! -! USAGE: CALL CALMICT_old(P1D,T1D,Q1D,C1D,FI1D,FR1D,FS1D,CUREFL -! &, QW1,QI1,QR1,QS1,DBZ1,DBZR1,DBZI1,DBZC1) -! -! INPUT ARGUMENT LIST: -! P1D - PRESSURE (PA) -! T1D - TEMPERATURE (K) -! Q1D - SPECIFIC HUMIDITY (KG/KG) -! C1D - TOTAL CONDENSATE (CWM, KG/KG) -! FI1D - F_ice (fraction of condensate in form of ice) -! FR1D - F_rain (fraction of liquid water in form of rain) -! FS1D - F_RimeF ("Rime Factor", ratio of total ice growth -! to deposition growth) -! CUREFL - Radar reflectivity contribution from convection (mm**6/m**3) -! -! OUTPUT ARGUMENT LIST: -! QW1 - CLOUD WATER MIXING RATIO (KG/KG) -! QI1 - CLOUD ICE MIXING RATIO (KG/KG) -! QR1 - RAIN MIXING RATIO (KG/KG) -! QS1 - "SNOW" (precipitation ice) MIXING RATIO (KG/KG) -! DBZ1 - Equivalent radar reflectivity factor in dBZ; i.e., 10*LOG10(Z) -! DBZR - Equivalent radar reflectivity factor from rain in dBZ -! DBZI - Equivalent radar reflectivity factor from ice (all forms) in dBZ -! DBZC - Equivalent radar reflectivity factor from parameterized convection -! in dBZ -! -! OUTPUT FILES: -! NONE -! -! SUBPROGRAMS CALLED: -! FUNCTIONS: -! FPVS -! UTILITIES: -! LIBRARY: -! NONE -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN -! MACHINE : IBM SP -!$$$ -! +!> CALMICT_old computes hydrometeors from the older version of the microphysics. +!> +!> This routin computes the mixing ratios of cloud water, cloud ice, +!> rain, and snow. The code is based on option MP_PHYSICS==95 in the +!> WRF namelist and option MICRO='fer' in NMMB configure files. +!> +!> @param[in] P1D Pressure (Pa). +!> @param[in] T1D Temperature (K). +!> @param[in] Q1D Specific humidity (kg/kg). +!> @param[in] C1D Total condensate (CWM, kg/kg). +!> @param[in] FI1D F_ice (fraction of condensate in form of ice). +!> @param[in] FR1D F_rain (fraction of liquid water in form of rain). +!> @param[in] FS1D F_RimeF ("Rime Factor", ratio of total ice growth to deposition growth). +!> @param[in] CUREFL Radar reflectivity contribution from convection (mm**6/m**3). +!> @param[out] QW1 Cloud water mixing ratio (kg/kg). +!> @param[out] QI1 Cloud ice mixing ratio (kg/kg). +!> @param[out] QR1 Rain mixing ratio (kg/kg). +!> @param[out] QS1 "Snow" (precipitation ice) mixing ratio (kg/kg). +!> @param[out] DBZ1 Equivalent radar reflectivity factor in dBZ; i.e., 10*LOG10(Z). +!> @param[out] DBZR Equivalent radar reflectivity factor from rain in dBZ. +!> @param[out] DBZI Equivalent radar reflectivity factor from ice (all forms) in dBZ. +!> @param[out] DBZC Equivalent radar reflectivity factor from parameterized convection in dBZ. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2001-08-14 | Yi Jin | Initial +!> 2002-02-11 | Brad Ferrier | Minor changes for consistency w/ NMM model +!> 2004-11-10 | Brad Ferrier | Removed cloud fraction algorithm +!> 2004-11-17 | H Chuang | WRF VERSION +!> 2014-03-11 | Brad Ferrier | Created new & old versions of this subroutine to process new & old versions of the microphysics +!> +!> @author Yi Jin W/NP2 @date 2001-08-14 use params_mod, only: dbzmin, epsq, tfrz, eps, rd, d608, oneps, nlimin use ctlblk_mod, only: jsta, jend, jsta_2l, jend_2u, im, & ista, iend, ista_2l, iend_2u diff --git a/sorc/ncep_post.fd/CALPBL.f b/sorc/ncep_post.fd/CALPBL.f index b3c6e0d20..015f4cd10 100644 --- a/sorc/ncep_post.fd/CALPBL.f +++ b/sorc/ncep_post.fd/CALPBL.f @@ -1,35 +1,18 @@ !> @file -! -!> SUBPROGRAM: CALPBL COMPUTES PBL HEIGHT BASED ON BULK RCH NUMBER -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE BULK RICHARDSON NUMBER -!! AND PBL HEIGHT ABOVE SURFACE -!! -!! PROGRAM HISTORY LOG: -!! 06-05-04 M TSIDULKO -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALPBL(PBLRI) -!! INPUT ARGUMENT LIST: -!! -!! OUTPUT ARGUMENT LIST: -!! PBLRI - PBL HEIGHT ABOVE GROUND -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - -!! CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : -!! +!> @brief Subroutine that computes PBL height based on bulk RCH number. +!> +!> This routine computes the bulk Richardson number +!> and PBL height above surface. +!> +!> @param[out] PBLRI PBL height above ground. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2006-05-04 | M Tsidulko | Initial +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author M Tsidulko @date 2006-05-04 SUBROUTINE CALPBL(PBLRI) ! diff --git a/sorc/ncep_post.fd/CALPBLREGIME.f b/sorc/ncep_post.fd/CALPBLREGIME.f index 808bd274d..72c59616f 100644 --- a/sorc/ncep_post.fd/CALPBLREGIME.f +++ b/sorc/ncep_post.fd/CALPBLREGIME.f @@ -1,48 +1,30 @@ !> @file -! . . . -!> SUBPROGRAM: CALPBL COMPUTES PBL HEIGHT BASED ON BULK RCH NUMBER -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE BULK RICHARDSON NUMBER BASED ON ALGORITHMS -!! FROM WRF SURFACE LAYER AND THEN DERIVE PBL REGIME AS FOLLOWS: -!! 1. BR >= 0.2; -!! REPRESENTS NIGHTTIME STABLE CONDITIONS (REGIME=1), -!! -!! 2. BR < 0.2 .AND. BR > 0.0; -!! REPRESENTS DAMPED MECHANICAL TURBULENT CONDITIONS -!! (REGIME=2), -!! -!! 3. BR == 0.0 -!! REPRESENTS FORCED CONVECTION CONDITIONS (REGIME=3), -!! -!! 4. BR < 0.0 -!! REPRESENTS FREE CONVECTION CONDITIONS (REGIME=4). -!! . -!! -!! PROGRAM HISTORY LOG: -!! 07-04-27 H CHUANG -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALPBLREGIME(PBLREGIME) -!! INPUT ARGUMENT LIST: -!! -!! OUTPUT ARGUMENT LIST: -!! PBLRI - PBL HEIGHT ABOVE GROUND -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - -!! CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : -!! +!> @brief Subroutine that computes PBL height based on bulk RCH number. +!> +!> This routine computes the bulk Richardson number based on algorithms +!> from WRF surface layer and then derive PBL regime as follows: +!> 1. BR >= 0.2; +!> Represents nighttime stable conditions (Regime=1), +!> +!> 2. BR < 0.2 .AND. BR > 0.0; +!> Represents damped mechanical turbulent conditions +!> (Regime=2), +!> +!> 3. BR == 0.0 +!> Represents forced convection conditions (Regime=3), +!> +!> 4. BR < 0.0 +!> Represnets free convection conditions (Regime=4). +!> +!> @param[out] PBLRI PBL Height above ground. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2007-04-27 | H Chuang | Initial +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author H Chuang @date 2007-04-27 SUBROUTINE CALPBLREGIME(PBLREGIME) ! diff --git a/sorc/ncep_post.fd/CALPOT.f b/sorc/ncep_post.fd/CALPOT.f index c8d0885d4..ec5cd58c7 100644 --- a/sorc/ncep_post.fd/CALPOT.f +++ b/sorc/ncep_post.fd/CALPOT.f @@ -1,40 +1,23 @@ !> @file -! -!> SUBPROGRAM: CALPOT COMPUTES POTENTIAL TEMPERATURE -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-24 -!! -!! ABSTRACT: -!! GIVEN PRESSURE AND TEMPERATURE THIS ROUTINE RETURNS -!! THE POTENTIAL TEMPERATURE. -!! -!! PROGRAM HISTORY LOG: -!! 92-12-24 RUSS TREADON -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 02-04-24 MIKE BALDWIN - WRF VERSION -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALPOT(P1D,T1D,THETA) -!! INPUT ARGUMENT LIST: -!! P1D - PRESSURE (PA) -!! T1D - TEMPERATURE (K) -!! -!! OUTPUT ARGUMENT LIST: -!! THETA - POTENTIAL TEMPERATURE (K) -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! NONE -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN 90 -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes potential temperature. +!> +!> Given pressure and temperature this routine returns +!> the potential temperature. +!> +!> @param[in] P1D pressures (Pa). +!> @param[in] T1D temperatures (K). +!> @param[out] THETA potential temperatures (K). +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1992-12-24 | Russ Treadon | Initial +!> 1998-06-15 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-04-24 | Mike Baldwin | WRF Version +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1992-12-24 SUBROUTINE CALPOT(P1D,T1D,THETA) ! diff --git a/sorc/ncep_post.fd/CALPW.f b/sorc/ncep_post.fd/CALPW.f index a15c067fb..6db279e12 100644 --- a/sorc/ncep_post.fd/CALPW.f +++ b/sorc/ncep_post.fd/CALPW.f @@ -1,62 +1,43 @@ !> @file -! . . . -!> SUBPROGRAM: CALPW COMPUTES -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-24 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES PRECIPITABLE WATER IN A COLUMN -!! EXTENDING FROM THE FIRST ATMOSPHERIC ETA LAYER TO THE -!! MODEL TOP. THE DEFINITION USED IS -!! TOP -!! PRECIPITABLE WATER = SUM (Q+CLDW) DP*HTM/G -!! BOT -!! WHERE, -!! BOT IS THE FIRST ETA LAYER, -!! TOP IS THE MODEL TOP, -!! Q IS THE SPECIFIC HUMIDITY (KG/KG) IN THE LAYER -!! CLDW IS THE CLOUD WATER (KG/KG) IN THE LAYER -!! DP (Pa) IS THE LAYER THICKNESS. -!! HTM IS THE HEIGHT MASK AT THAT LAYER (=0 IF BELOW GROUND) -!! G IS THE GRAVITATIONAL CONSTANT -!! -!! PROGRAM HISTORY LOG: -!! 92-12-24 RUSS TREADON -!! 96-03-04 MIKE BALDWIN - ADD CLOUD WATER AND SPEED UP CODE -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 02-06-19 MIKE BALDWIN - WRF VERSION -!! 04-12-30 H CHUANG - UPDATE TO CALCULATE TOTAL COLUMN FOR OTHER -!! HYDROMETEORS -!! 14-11-12 SARAH LU - UPDATE TO CALCULATE AEROSOL OPTICAL DEPTH -!! 15-07-02 SARAH LU - UPDATE TO CALCULATE SCATTERING AEROSOL -!! OPTICAL DEPTH (18) -!! 15-07-04 SARAH LU - CORRECT PW INTEGRATION FOR AOD (17) -!! 15-07-10 SARAH LU - UPDATE TO CALCULATE ASYMETRY PARAMETER -!! 19-07-25 Li(Kate) Zhang - MERGE SARHA LU's update for FV3-Chem -!! 20-11-10 JESSE MENG - USE UPP_PHYSICS MODULE -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALPW(PW) -!! INPUT ARGUMENT LIST: -!! PW - ARRAY OF PRECIPITABLE WATER. -!! -!! OUTPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - LOOPS -!! MASKS -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes precipitable water. +!> +!>
+!> This routine computes precipitable water in a column
+!> extending from the first atmospheric ETA layer to the
+!> model top. The definition used is
+!>                      TOP
+!> precipitable water = sum (Q+CLDW) DP*HTM/G
+!>                      BOT
+!> where,
+!> BOT is the first ETA layer,
+!> TOP is the model top,
+!> Q is the specific humidity (kg/kg) in the layer
+!> CLDW is the cloud water (kg/kg) in the layer
+!> DP (Pa) is the layer thickness.
+!> HTM is the height mask at that layer (=0 if below ground)
+!> G is the gravitational constant.
+!>
+!> +!> @param[in] PW Array of precipitable water. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1992-12-24 | Russ Treadon | Initial +!> 1996-03-04 | Mike Baldwin | Add cloud water and speed up code +!> 1998-06-15 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-06-19 | Mike Baldwin | WRF Version +!> 2004-12-30 | H Chuang | Update to calculate total column for other hydrometeors +!> 2014-11-12 | Sarah Lu | Update tp calculate aerosol optical depth +!> 2015-07-02 | Sarah Lu | Update to calculate scattering aerosal optical depth (18) +!> 2015-07-04 | Sarah Lu | Correct PW integration for AOD (17) +!> 2015-07-10 | Sarah Lu | Update to calculate asymetry parameter +!> 2019-07-25 | Li(Kate) Zhang | Merge Sarah Lu's update for FV3-Chem +!> 2020-11-10 | Jesse Meng | Use UPP_PHYSICS Module +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1992-12-24 SUBROUTINE CALPW(PW,IDECID) ! diff --git a/sorc/ncep_post.fd/CALRAD_WCLOUD_newcrtm.f b/sorc/ncep_post.fd/CALRAD_WCLOUD_newcrtm.f index 62d3d85bc..4a7c19e3d 100644 --- a/sorc/ncep_post.fd/CALRAD_WCLOUD_newcrtm.f +++ b/sorc/ncep_post.fd/CALRAD_WCLOUD_newcrtm.f @@ -1,31 +1,23 @@ !> @file -! -!> THIS ROUTINE COMPUTES MODEL DERIVED BRIGHTNESS TEMPERATURE -!! USING CRTM. IT IS PATTERNED AFTER GSI SETUPRAD WITH TREADON'S HELP -!! -!! PROGRAM HISTORY LOG: -!! - 11-02-06 Jun WANG - addgrib2 option -!! - 14-12-09 WM LEWIS ADDED: -!! FUNCTION EFFR TO COMPUTE EFFECTIVE PARTICLE RADII -!! CHANNEL SELECTION USING LVLS FROM WRF_CNTRL.PARM -!! - 19-04-01 Sharon NEBUDA - Added output option for GOES-16 & GOES-17 ABI IR Channels 7-16 -!! - 20-04-09 Tracy Hertneky - Added Himawari-8 AHI CH7-CH16 -!! - 21-01-10 Web Meng - Added checking points for skiping grids with filling value spval -!! - 21-03-11 Bo Cui - improve local arrays memory -!! - 21-08-31 Lin Zhu - added ssmis-f17 channels 15-18 grib2 output -!! - 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! -!! LIBRARY: -!! /nwprod/lib/sorc/crtm2 -!! -!! @author CHUANG @date 07-01-17 -!! +!> @brief Subroutine that computes model derived brightness temperature. +!> +!> This routine computes model derived brightness temperature +!> using CRTM. It is patterned after GSI setuprad with Treadon's help. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2007-01-17 | H Chuang | Initial +!> 2011-02-06 | Jun Wang | add grib2 option +!> 2014-12-09 | WM Lewis | added function EFFR to compute effective particle radii channel selection using LVLS from WRF_CNTRL.PARM +!> 2019-04-01 | Sharon Nebuda | Added output option for GOES-16 & GOES-17 ABI IR Channels 7-16 +!> 2020-04-09 | Tracy Hertneky | Added Himawari-8 AHI CH7-CH16 +!> 2021-01-10 | Wen Meng | Added checking points for skiping grids with filling value spval +!> 2021-03-11 | Bo Cui | improve local arrays memory +!> 2021-08-31 | Lin Zhu | added ssmis-f17 channels 15-18 grib2 output +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Chuang @date 2007-01-17 SUBROUTINE CALRAD_WCLOUD use vrbls3d, only: o3, pint, pmid, t, q, qqw, qqi, qqr, f_rimef, nlice, nrain, qqs, qqg, & diff --git a/sorc/ncep_post.fd/CALRCH.f b/sorc/ncep_post.fd/CALRCH.f index e177112ac..b1b520aed 100644 --- a/sorc/ncep_post.fd/CALRCH.f +++ b/sorc/ncep_post.fd/CALRCH.f @@ -1,44 +1,26 @@ !> @file -! -!> SUBPROGRAM: CALRCH COMPUTES GRD RCH NUMBER -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-10-11 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE GRADIENT RICHARDSON NUMBER -!! AS CODED IN ETA MODEL SUBROUTINE PROFQ2.F. -!! FIX TO AVOID UNREASONABLY SMALL ANEMOMETER LEVEL WINDS. -!! -!! PROGRAM HISTORY LOG: -!! 93-10-11 RUSS TREADON -!! 98-06-17 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 01-10-22 H CHUANG - MODIFIED TO PROCESS HYBRID MODEL OUTPUT -!! 02-01-15 MIKE BALDWIN - WRF VERSION -!! 05-02-25 H CHUANG - ADD COMPUTATION FOR NMM E GRID -!! 05-07-07 BINBIN ZHOU - ADD RSM FOR A GRID -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALRCH(EL,RICHNO) -!! INPUT ARGUMENT LIST: -!! EL - MIXING LENGTH SCALE. -!! -!! OUTPUT ARGUMENT LIST: -!! RICHNO - GRADIENT RICHARDSON NUMBER. -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - -!! CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes GRD RCH number. +!> +!> This routine computes the gradient Richardson number +!> as coded in ETA model subroutine PROFQ2.F. +!> Fix to avoid unreasonably small anemometer level winds. +!> +!> @param[in] EL Mixing length scale. +!> @param[out] RICHNO Gradient Richardson number. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-10-11 | Russ Treadon | Initial +!> 1998-06-17 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2001-10-22 | H Chuang | Modified to process hybrid model output +!> 2002-01-15 | Mike Baldwin | WRF Version +!> 2005-02-25 | H Chuang | Add computation for NMM E grid +!> 2005-07-07 | Binbin Zhou | Add RSM for A Grid +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1993-10-11 SUBROUTINE CALRCH(EL,RICHNO) ! diff --git a/sorc/ncep_post.fd/CALSTRM.f b/sorc/ncep_post.fd/CALSTRM.f index c99390e52..adf7ac43e 100644 --- a/sorc/ncep_post.fd/CALSTRM.f +++ b/sorc/ncep_post.fd/CALSTRM.f @@ -1,44 +1,27 @@ !> @file -! -!> SUBPROGRAM: CALSTRM COMPUTES GEO STREAMFUNCTION -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE GEOSTROPHIC STREAMFUNCTION, -!! PSI, FROM THE PASSED GEOPOTENTIAL HEIGHT FIELD, Z. -!! THE FORMULA USED IS PSI = G*Z/F0, WHERE G IS THE -!! GRAVITATIONAL ACCELERATION CONSTANT AND F0 IS A -!! CONSTANT CORIOLIS PARAMETER. F0 IS SET TO BE THE -!! VALUE OF THE CORIOLIS PARAMETER NEAR THE CENTER -!! OF THE MODEL GRID. -!! -!! PROGRAM HISTORY LOG: -!! 92-12-22 RUSS TREADON -!! 98-06-08 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-05 JIM TUCCILLO - MPI VERSION -!! 02-06-13 MIKE BALDWIN - WRF VERSION -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALSTRM(Z1D,STRM) -!! INPUT ARGUMENT LIST: -!! Z1D - GEOPOTENTIAL HEIGHT (M) -!! -!! OUTPUT ARGUMENT LIST: -!! STRM - GEOSTROPHIC STREAMFUNCTION -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - MAPOT -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes geo streamfunction. +!> +!> This routine computes the geostrophic streamfunction, +!> PSI, from the passed geopotential height field, Z. +!> The formule used it PSI = G*Z/F0, where G is the +!> gravitational acceleration constant and F0 is a +!> constant Coriolis parameter. F0 is set to be the +!> valus of the Coriolis parameter near the center +!> of the model grid. +!> +!> @param[in] Z1D Geopotential height (m). +!> @param[out] STRM Geostrophic streamfunction. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1992-12-22 | Russ Treadon | Initial +!> 1998-06-08 | T Black | Conversion from 1-D TO 2-D +!> 2000-01-05 | Jim Tuccillo | MPI Version +!> 2002-06-13 | Mike Baldwin | WRF Version +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1992-12-22 SUBROUTINE CALSTRM(Z1D,STRM) ! diff --git a/sorc/ncep_post.fd/CALTAU.f b/sorc/ncep_post.fd/CALTAU.f index d9f36302c..08338039d 100644 --- a/sorc/ncep_post.fd/CALTAU.f +++ b/sorc/ncep_post.fd/CALTAU.f @@ -1,47 +1,30 @@ !> @file -! -!> SUBPROGRAM: CALTAU COMPUTE U AND V WIND STRESSES -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-09-01 -!! -!! ABSTRACT: THIS ROUTINE COMPUTES SURFACE LAYER U AND V -!! WIND COMPONENT STRESSES USING K THEORY AS PRESENTED -!! IN SECTION 8.4 OF "NUMBERICAL PREDICTION AND DYNAMIC -!! METEOROLOGY" BY HALTINER AND WILLIAMS (1980, JOHN WILEY -!! & SONS). -!! -!! PROGRAM HISTORY LOG: -!! 93-09-01 RUSS TREADON -!! 98-06-11 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 01-10-25 H CHUANG - MODIFIED TO PROCESS HYBRID OUTPUT -!! 02-01-15 MIKE BALDWIN - WRF VERSION, OUTPUT IS ON MASS-POINTS -!! 05-02-23 H CHUANG - COMPUTE STRESS FOR NMM ON WIND POINTS -!! 05-07-07 BINBIN ZHOU - ADD RSM STRESS for A GRID -!! 21-07-26 W Meng - Restrict computation from undefined grids -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! USAGE: CALL CALTAU(TAUX,TAUY) -!! INPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT ARGUMENT LIST: -!! TAUX - SUFACE LAYER U COMPONENT WIND STRESS. -!! TAUY - SUFACE LAYER V COMPONENT WIND STRESS. -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! CLMAX -!! MIXLEN -!! -!! LIBRARY: -!! COMMON - -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes U and V wind stresses. +!> +!> This routine computes surface layer U and V +!> wind component stresses using K theory as presented +!> in section 8.4 of "Numerical prediction and dynamic +!> meteorology" by Haltiner and Williams (1980, John Wiley +!> & Sons). +!> +!> @param[out] TAUX Suface layer U component wind stress. +!> @param[out] TAUY Suface layer V component wind stress. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-09-01 | Russ Treadon | Initial +!> 1998-06-11 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2001-10-25 | H Chuang | Modified to process hybrid output +!> 2002-01-15 | Mike Baldwin | WRF Version, output is on mass-points +!> 2005-02-23 | H Chuang | Compute stress for NMM on wind points +!> 2005-07-07 | Binbin Zhou | Add RSM stress for A Grid +!> 2021-07-26 | W Meng | Restrict computation from undefined grids +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1993-09-01 + SUBROUTINE CALTAU(TAUX,TAUY) ! diff --git a/sorc/ncep_post.fd/CALTHTE.f b/sorc/ncep_post.fd/CALTHTE.f index 96d1540b4..dae86a8a9 100644 --- a/sorc/ncep_post.fd/CALTHTE.f +++ b/sorc/ncep_post.fd/CALTHTE.f @@ -1,42 +1,26 @@ !> @file -! -!> SUBPROGRAM: CALTHTE COMPUTES THETA-E -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-06-18 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE EQUIVALENT POTENTIAL TEMPERATURE -!! GIVEN PRESSURE, TEMPERATURE, AND SPECIFIC HUMIDITY. THE -!! EQUATIONS OF BOLTON (MWR,1980) ARE USED. -!! -!! PROGRAM HISTORY LOG: -!! 93-06-18 RUSS TREADON -!! 98-06-16 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 21-07-28 W Meng - Restrict computation from undefined grids -!! 21-09-02 Bo Cui - Decompose UPP in X direction -!! -!! USAGE: CALL CALTHTE(P1D,T1D,Q1D,THTE) -!! INPUT ARGUMENT LIST: -!! P1D - PRESSURE (PA) -!! T1D - TEMPERATURE (K) -!! Q1D - SPECIFIC HUMIDITY (KG/KG) -!! -!! OUTPUT ARGUMENT LIST: -!! THTE - THETA-E (K) -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! VAPOR - FUNCTION TO CALCULATE VAPOR PRESSURE. -!! LIBRARY: -!! NONE -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes Theta-E. +!> +!> This routine computes the equivalent potential temperature +!> given pressure, temperature, and specific humidity. The +!> equations of Bolton (MWR,1980) are used. +!> +!> @param[in] P1D pressure (Pa). +!> @param[in] T1D temperature (K). +!> @param[in] Q1D specific humidity(kg/kg). +!> @param[out] THTE Theta-E (K). +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-06-18 | Russ Treadon | Initial +!> 1998-06-16 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2021-07-28 | W Meng | Restrict computation from undefined grids +!> 2021-09-02 | Bo Cui | Decompose UPP in X direction +!> +!> @author Russ Treadon W/NP2 @date 1993-06-18 + SUBROUTINE CALTHTE(P1D,T1D,Q1D,THTE) ! diff --git a/sorc/ncep_post.fd/CALUPDHEL.f b/sorc/ncep_post.fd/CALUPDHEL.f index ff9704506..17ee6b81c 100644 --- a/sorc/ncep_post.fd/CALUPDHEL.f +++ b/sorc/ncep_post.fd/CALUPDHEL.f @@ -1,39 +1,19 @@ !> @file -! -!> SUBPROGRAM: CALUPDHEL COMPUTES UPDRAFT HELICITY -!! PRGRMMR: PYLE ORG: W/NP2 DATE: 07-10-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE UPDRAFT HELICITY -!! -!! PROGRAM HISTORY LOG: -!! 07-10-22 M PYLE - based on SPC Algorithm courtesy of David Bright -!! 11-01-11 M Pyle - converted to F90 for unified post -!! 11-04-05 H Chuang - added B grid option -!! 20-11-06 J Meng - USE UPP_MATH MODULE -!! 21-10-31 J Meng - 2D DECOMPOSITION -!! -!! USAGE: CALL CALUPDHEL(UPDHEL) -!! -!! INPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT ARGUMENT LIST: -!! UPDHEL - UPDRAFT HELICITY (M^2/S^2) -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! +!> @brief Subroutine that computes the updraft helicity. +!> +!> @param[out] UPDHEL Updraft helicity (m^2/s^2). +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2007-10-22 | M Pyle | Initial +!> 2007-10-22 | M Pyle | based on SPC Algorithm courtesy of David Bright +!> 2011-01-11 | M Pyle | converted to F90 for unified post +!> 2011-04-05 | H Chuang | added B grid option +!> 2020-11-06 | J Meng | Use UPP_MATH Module +!> 2021-10-31 | J Meng | 2D DECOMPOSITION +!> +!> @author M Pyle W/NP2 @date 2007-10-22 SUBROUTINE CALUPDHEL(UPDHEL) ! diff --git a/sorc/ncep_post.fd/CALWXT_BOURG.f b/sorc/ncep_post.fd/CALWXT_BOURG.f index 230b34de5..51fb0a3d0 100644 --- a/sorc/ncep_post.fd/CALWXT_BOURG.f +++ b/sorc/ncep_post.fd/CALWXT_BOURG.f @@ -1,69 +1,55 @@ !> @file -! -!> Subprogram: calwxt_bourg Calculate precipitation type (Bourgouin) -!! Prgmmr: Baldwin Org: np22 Date: 1999-07-06 -!! -!! Abstract: This routine computes precipitation type -!! using a decision tree approach that uses the so-called -!! "energy method" of Bourgouin of AES (Canada) 1992 -!! -!! Program history log: -!! 1999-07-06 M Baldwin -!! 1999-09-20 M Baldwin make more consistent with bourgouin (1992) -!! 2005-08-24 G Manikin added to wrf post -!! 2007-06-19 M Iredell mersenne twister, best practices -!! 2015-00-00 S Moorthi changed random number call and optimization and cleanup -!! 2021-10-31 J Meng 2D DECOMPOSITION -!! -!! Usage: call calwxt_bourg(im,jm,jsta_2l,jend_2u,jsta,jend,lm,lp1, & -!! & iseed,g,pthresh, & -!! & t,q,pmid,pint,lmh,prec,zint,ptype) -!! Input argument list: -!! im integer i dimension -!! jm integer j dimension -!! jsta_2l integer j dimension start point (including haloes) -!! jend_2u integer j dimension end point (including haloes) -!! jsta integer j dimension start point (excluding haloes) -!! jend integer j dimension end point (excluding haloes) -!! lm integer k dimension -!! lp1 integer k dimension plus 1 -!! iseed integer random number seed -!! g real gravity (m/s**2) -!! pthresh real precipitation threshold (m) -!! t real(im,jsta_2l:jend_2u,lm) mid layer temp (K) -!! q real(im,jsta_2l:jend_2u,lm) specific humidity (kg/kg) -!! pmid real(im,jsta_2l:jend_2u,lm) mid layer pressure (Pa) -!! pint real(im,jsta_2l:jend_2u,lp1) interface pressure (Pa) -!! lmh real(im,jsta_2l:jend_2u) max number of layers -!! prec real(im,jsta_2l:jend_2u) precipitation (m) -!! zint real(im,jsta_2l:jend_2u,lp1) interface height (m) -!! Output argument list: -!! ptype integer(im,jm) instantaneous weather type () -!! acts like a 4 bit binary -!! 1111 = rain/freezing rain/ice pellets/snow -!! where the one's digit is for snow -!! the two's digit is for ice pellets -!! the four's digit is for freezing rain -!! and the eight's digit is for rain -!! in other words... -!! ptype=1 snow -!! ptype=2 ice pellets/mix with ice pellets -!! ptype=4 freezing rain/mix with freezing rain -!! ptype=8 rain -!! -!! Modules used: -!! mersenne_twister pseudo-random number generator -!! -!! Subprograms called: -!! random_number pseudo-random number generator -!! -!! Attributes: -!! Language: Fortran 90 -!! -!! Remarks: vertical order of arrays must be layer 1 = top -!! and layer lmh = bottom -!! -!! +!> @brief Subroutine that calculate precipitation type (Bourgouin). +!> +!> This routine computes precipitation type. +!> using a decision tree approach that uses the so-called +!> "energy method" of Bourgouin of AES (Canada) 1992. +!> +!> @param[in] im integer i dimension. +!> @param[in] jm integer j dimension. +!> @param[in] jsta_2l integer j dimension start point (including haloes). +!> @param[in] jend_2u integer j dimension end point (including haloes). +!> @param[in] jsta integer j dimension start point (excluding haloes). +!> @param[in] jend integer j dimension end point (excluding haloes). +!> @param[in] lm integer k dimension. +!> @param[in] lp1 integer k dimension plus 1. +!> @param[in] iseed integer random number seed. +!> @param[in] g real gravity (m/s**2). +!> @param[in] pthresh real precipitation threshold (m). +!> @param[in] t real(im,jsta_2l:jend_2u,lm) mid layer temp (K). +!> @param[in] q real(im,jsta_2l:jend_2u,lm) specific humidity (kg/kg). +!> @param[in] pmid real(im,jsta_2l:jend_2u,lm) mid layer pressure (Pa). +!> @param[in] pint real(im,jsta_2l:jend_2u,lp1) interface pressure (Pa). +!> @param[in] lmh real(im,jsta_2l:jend_2u) max number of layers. +!> @param[in] prec real(im,jsta_2l:jend_2u) precipitation (m). +!> @param[in] zint real(im,jsta_2l:jend_2u,lp1) interface height (m). +!> @param[out] ptype integer(im,jm) instantaneous weather type () acts like a 4 bit binary 1111 = rain/freezing rain/ice pellets/snow. +!>
+!>                   where the one's digit is for snow
+!>                         the two's digit is for ice pellets
+!>                         the four's digit is for freezing rain
+!>                         and the eight's digit is for rain
+!>                         in other words...
+!>                         ptype=1 snow
+!>                         ptype=2 ice pellets/mix with ice pellets
+!>                         ptype=4 freezing rain/mix with freezing rain
+!>                         ptype=8 rain
+!>
+!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1999-07-06 | M Baldwin | Initial +!> 1999-09-20 | M Baldwin | make more consistent with bourgouin (1992) +!> 2005-08-24 | G Manikin | added to wrf post +!> 2007-06-19 | M Iredell | mersenne twister, best practices +!> 2015-??-?? | S Moorthi | changed random number call and optimization and cleanup +!> 2021-10-31 | J Meng | 2D DECOMPOSITION +!> +!> Remarks: vertical order of arrays must be layer 1 = top +!> and layer lmh = bottom +!> +!> @author M Baldwin np22 @date 1999-07-06 subroutine calwxt_bourg_post(im,ista_2l,iend_2u,ista,iend,jm,jsta_2l,jend_2u,jsta,jend,lm,lp1, & & iseed,g,pthresh, & diff --git a/sorc/ncep_post.fd/CLDRAD.f b/sorc/ncep_post.fd/CLDRAD.f index 422caf06a..eeb3e2c9b 100644 --- a/sorc/ncep_post.fd/CLDRAD.f +++ b/sorc/ncep_post.fd/CLDRAD.f @@ -1,99 +1,70 @@ !> @file -! . . . -!> SUBPROGRAM: CLDRAD POST SNDING/CLOUD/RADTN FIELDS -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-08-30 -!! -!! ABSTRACT: THIS ROUTINE COMPUTES/POSTS SOUNDING, CLOUD -!! RELATED, AND RADIATION FIELDS. UNDER THE HEADING OF -!! SOUNDING FIELDS FALL THE THREE ETA MODEL LIFTED INDICES, -!! CAPE, CIN, AND TOTAL COLUMN PRECIPITABLE WATER. -!! -!! THE THREE ETA MODEL LIFTED INDICES DIFFER ONLY IN THE -!! DEFINITION OF THE PARCEL TO LIFT. ONE LIFTS PARCELS FROM -!! THE LOWEST ABOVE GROUND ETA LAYER. ANOTHER LIFTS MEAN -!! PARCELS FROM ANY OF NBND BOUNDARY LAYERS (SEE SUBROUTINE -!! BNDLYR). THE FINAL TYPE OF LIFTED INDEX IS A BEST LIFTED -!! INDEX BASED ON THE NBND BOUNDARY LAYER LIFTED INDICES. -!! -!! TWO TYPES OF CAPE/CIN ARE AVAILABLE. ONE IS BASED ON PARCELS -!! IN THE LOWEST ETA LAYER ABOVE GROUND. THE OTHER IS BASED -!! ON A LAYER MEAN PARCEL IN THE N-TH BOUNDARY LAYER ABOVE -!! THE GROUND. SEE SUBROUTINE CALCAPE FOR DETAILS. -!! -!! THE CLOUD FRACTION AND LIQUID CLOUD WATER FIELDS ARE DIRECTLY -!! FROM THE MODEL WITH MINIMAL POST PROCESSING. THE LIQUID -!! CLOUD WATER, 3-D CLOUD FRACTION, AND TEMPERATURE TENDENCIES -!! DUE TO PRECIPITATION ARE NOT POSTED IN THIS ROUTINE. SEE -!! SUBROUTINE ETAFLD FOR THESE FIELDS. LIFTING CONDENSATION -!! LEVEL HEIGHT AND PRESSURE ARE COMPUTED AND POSTED IN -!! SUBROUTINE MISCLN. -!! -!! THE RADIATION FIELDS POSTED BY THIS ROUTINE ARE THOSE COMPUTED -!! DIRECTLY IN THE MODEL. -!! -!! PROGRAM HISTORY LOG: -!! 93-08-30 RUSS TREADON -!! 94-08-04 MICHAEL BALDWIN - ADDED OUTPUT OF INSTANTANEOUS SFC -!! FLUXES OF NET SW AND LW DOWN RADIATION -!! 97-04-25 MICHAEL BALDWIN - FIX PDS FOR PRECIPITABLE WATER -!! 97-04-29 GEOFF MANIKIN - MOVED CLOUD TOP TEMPS CALCULATION -!! TO THIS SUBROUTINE. CHANGED METHOD -!! OF DETERMINING WHERE CLOUD BASE AND -!! TOP ARE FOUND AND ADDED HEIGHT OPTION -!! FOR TOP AND BASE. -!! 98-04-29 GEOFF MANIKIN - CHANGED VALUE FOR CLOUD BASE/TOP PRESSURES -!! AND HEIGHTS FROM SPVAL TO -500 -!! 98-06-15 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 98-07-17 MIKE BALDWIN - REMOVED LABL84 -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 00-02-22 GEOFF MANIKIN - CHANGED VALUE FOR CLOUD BASE/TOP PRESSURES -!! AND HEIGHTS FROM SPVAL TO -500 (WAS NOT IN -!! PREVIOUS IBM VERSION) -!! 01-10-22 H CHUANG - MODIFIED TO PROCESS HYBRID MODEL OUTPUT -!! 02-01-15 MIKE BALDWIN - WRF VERSION -!! 05-01-06 H CHUANG - ADD VARIOUS CLOUD FIELDS -!! 05-07-07 BINBIN ZHOU - ADD RSM MODEL -!! 05-08-30 BINBIN ZHOU - ADD CEILING and FLIGHT CONDITION RESTRICTION -!! 10-09-09 GEOFF MANIKIN - REVISED CALL TO CALCAPE -!! 11-02-06 Jun Wang - ADD GRIB2 OPTION -!! 11-12-14 SARAH LU - ADD AEROSOL OPTICAL PROPERTIES -!! 11-12-16 SARAH LU - ADD AEROSOL 2D DIAG FIELDS -!! 11-12-23 SARAH LU - CONSOLIDATE ALL GOCART FIELDS TO BLOCK 4 -!! 11-12-23 SARAH LU - ADD AOD AT ADDITIONAL CHANNELS -!! 12-04-03 Jun Wang - Add lftx and GFS convective cloud cover for grib2 -!! 13-05-06 Shrinivas Moorthi - Add cloud condensate to total precip water -!! 13-12-23 LU/Wang - READ AEROSOL OPTICAL PROPERTIES LUTS to compute dust aod, -!! non-dust aod, and use geos5 gocart LUTS -!! 15-??-?? S. Moorthi - threading, optimization, local dimension -!! 19-07-24 Li(Kate) Zhang Merge and update ARAH Lu's work from NGAC into FV3-Chem -!! 19-10-30 Bo CUI - Remove "GOTO" statement -!! 20-03-25 Jesse Meng - remove grib1 -!! 20-05-20 Jesse Meng - CALRH unification with NAM scheme -!! 20-11-10 Jesse Meng - USE UPP_PHYSICS MODULE -!! 21-02-08 Anning Cheng, read aod550, aod550_du/su/ss/oc/bc -!! directly from fv3gfs and output to grib2 by setting rdaod -!! 21-04-01 Jesse Meng - COMPUTATION ON DEFINED POINTS ONLY -!! -!! USAGE: CALL CLDRAD -!! INPUT ARGUMENT LIST: -!! -!! OUTPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - RQSTFLD -!! CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : IBM SP -!! +!> @brief Subroutine that post SNDING/CLOUD/RADTN fields. +!> +!> This routine computes/posts sounding cloud +!> related, and radiation fields. Under the heading of +!> sounding fields fall the three ETA model lifted indices, +!> CAPE, CIN, and total column precipitable water. +!> +!> The three ETA model lifted indices differ only in the +!> definition of the parcel to lift. One lifts parcels from +!> the lowest above ground ETA layer. Another lifts mean +!> parcels from any of NBND boundary layers (See subroutine +!> BNDLYR). The final type of lifted index is a best lifted +!> inden based on the NBND bouddary layer lifted indices. +!> +!> Two types of CAPE/CIN are available. One is based on parcels +!> in the lowest ETA layer above ground. The other is based +!> on a layer mean parcel in the N-th boundary layer above +!> the ground. See subroutine CALCAPE for details. +!> +!> The cloud fraction and liquid cloud water fields are directly +!> from the model with minimal post processing. The liquid +!> cloud water, 3-D cloud fraction, and temperature tendencies +!> due to precipotation are not posted in this routine. See +!> sunroutine ETAFLD for these fields. Lifting condensation +!> level height and pressure are computed and posted in +!> subroutine MISCLN. +!> +!> The radiation fields posted by this routine are those computed +!> directly in the model. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-08-30 | Russ Treadon | Initial +!> 1994-08-04 | Mike Baldwin | Added output of instantaneous SFC fluxes of net SW and LW down radiation +!> 1997-04-25 | Mike Baldwin | Fix PDS for precipitable water +!> 1997-04-29 | Geoff Manikin | Moved cloud top temps calculation to this subroutine. Changed method of determining where cloud base and top are found and added height option for top and base +!> 1998-04-29 | Geoff Manikin | Changed value for cloud base/top pressures and heights from SPVAL to -500 +!> 1998-06-15 | T Black | Conversion from 1-D to 2-D +!> 1998-07-17 | Mike Baldwin | Removed LABL84 +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2000-02-22 | Geoff Manikin | Changed value for cloud base/top pressures and heights from SPVAL to -500 (was not in previous IBM version) +!> 2001-10-22 | H Chuang | Modified to process hybrid model output +!> 2002-01-15 | Mike Baldwin | WRF version +!> 2005-01-06 | H Chuang | Add various cloud fields +!> 2005-07-07 | Binbin Zhou | Add RSM model +!> 2005-08-30 | Binbin Zhou | Add ceiling and flight condition restriction +!> 2010-09-09 | Geoff Manikin | Revised call to CALCAPE +!> 2011-02-06 | Jun Wang | Add GRIB2 Option +!> 2011-12-14 | Sarah Lu | Add Aerosol optical properties +!> 2011-12-16 | Sarah Lu | Add Aerosol 2D DIAG fields +!> 2011-12-23 | Sarah Lu | Consolidate all GOCART fields to BLOCK 4 +!> 2011-12-23 | Sarah Lu | Add AOD at additional channels +!> 2012-04-03 | Jun Wang | Add lftx and GFS convective cloud cover for grib2 +!> 2013-05-06 | Shrinivas Moorthi | Add cloud condensate to total precip water +!> 2013-12-23 | Lu/Wang | Read aerosol optical properties LUTS to compute dust aod, non-dust aod, and use geos5 gocart LUTS +!> 2015-??-?? | S. Moorthi | threading, optimization, local dimension +!> 2019-07-24 | Li(Kate) Zhang | Merge and update ARAH Lu's work from NGAC into FV3-Chem +!> 2019-10-30 | Bo Cui | Remove "GOTO" statement +!> 2020-03-25 | Jesse Meng | Remove grib1 +!> 2020-05-20 | Jesse Meng | CALRH unification with NAM scheme +!> 2020-11-10 | Jesse Meng | Use UPP_PHYSICS Module +!> 2021-02-08 | Anning Cheng | read aod550, aod550_du/su/ss/oc/bc directly from fv3gfs and output to grib2 by setting rdaod +!> 2021-04-01 | Jesse Meng | Computation on defined points only +!> +!> @author Russ Treadon W/NP2 @date 1993-08-30 SUBROUTINE CLDRAD ! @@ -107,18 +78,19 @@ SUBROUTINE CLDRAD HBOT, HBOTD, HBOTS, HTOP, HTOPD, HTOPS, FIS, PBLH, & PBOT, PBOTL, PBOTM, PBOTH, CNVCFR, PTOP, PTOPL, & PTOPM, PTOPH, TTOPL, TTOPM, TTOPH, PBLCFR, CLDWORK, & - ASWIN, AUVBIN, AUVBINC, ASWIN, ASWOUT,ALWOUT, ASWTOA,& + ASWIN, AUVBIN, AUVBINC, ASWOUT,ALWOUT, ASWTOA,& RLWTOA, CZMEAN, CZEN, RSWIN, ALWIN, ALWTOA, RLWIN, & SIGT4, RSWOUT, RADOT, RSWINC, ASWINC, ASWOUTC, & ASWTOAC, ALWOUTC, ASWTOAC, AVISBEAMSWIN, & - AVISDIFFSWIN, ASWINTOA, ASWINC, ASWTOAC, AIRBEAMSWIN,& + AVISDIFFSWIN, ASWINTOA, ASWTOAC, AIRBEAMSWIN,& AIRDIFFSWIN, DUSMASS, DUSMASS25, DUCMASS, DUCMASS25, & ALWINC, ALWTOAC, SWDDNI, SWDDIF, SWDNBC, SWDDNIC, & SWDDIFC, SWUPBC, LWDNBC, LWUPBC, SWUPT, & TAOD5502D, AERSSA2D, AERASY2D, MEAN_FRP, LWP, IWP, & AVGCPRATE, & DUSTCB,SSCB,BCCB,OCCB,SULFCB,DUSTPM,SSPM,aod550, & - du_aod550,ss_aod550,su_aod550,oc_aod550,bc_aod550 + du_aod550,ss_aod550,su_aod550,oc_aod550,bc_aod550, & + PWAT use masks, only: LMH, HTM use params_mod, only: TFRZ, D00, H99999, QCLDMIN, SMALL, D608, H1, ROG, & GI, RD, QCONV, ABSCOEFI, ABSCOEF, STBOL, PQ0, A2, & @@ -255,6 +227,7 @@ SUBROUTINE CLDRAD data INDX_EXT / 610, 611, 612, 613, 614 / data INDX_SCA / 651, 652, 653, 654, 655 / logical, parameter :: debugprint = .false. + logical :: Model_Pwat ! ! !************************************************************************* @@ -422,12 +395,29 @@ SUBROUTINE CLDRAD IF (IGET(080) > 0) THEN ! dong GRID1 = spval + Model_Pwat = .false. + DO J=JSTA,JEND + DO I=ISTA,IEND + IF(ABS(PWAT(I,J)-SPVAL)>SMALL) THEN + Model_Pwat = .true. + exit + ENDIF + END DO + END DO + IF (Model_Pwat) THEN + DO J=JSTA,JEND + DO I=ISTA,IEND + GRID1(I,J) = PWAT(I,J) + END DO + END DO + ELSE CALL CALPW(GRID1(ista:iend,jsta:jend),1) DO J=JSTA,JEND DO I=ISTA,IEND IF(FIS(I,J) >= SPVAL) GRID1(I,J)=spval END DO END DO + ENDIF CALL BOUND(GRID1,D00,H99999) if(grib == "grib2" )then cfld = cfld + 1 @@ -5642,9 +5632,9 @@ SUBROUTINE CLDRAD END subroutine cb_cover(cbcov) -! Calculate CB coverage by using fuzzy logic -! Evaluate membership of val in a fuzzy set fuzzy. -! Assume f is in x-log scale +!> Calculate CB coverage by using fuzzy logic +!> Evaluate membership of val in a fuzzy set fuzzy. +!> Assume f is in x-log scale use ctlblk_mod, only: SPVAL,JSTA,JEND,IM,ISTA,IEND implicit none real, intent(inout) :: cbcov(ISTA:IEND,JSTA:JEND) diff --git a/sorc/ncep_post.fd/CMakeLists.txt b/sorc/ncep_post.fd/CMakeLists.txt index 3fab5c995..bce8c8361 100644 --- a/sorc/ncep_post.fd/CMakeLists.txt +++ b/sorc/ncep_post.fd/CMakeLists.txt @@ -148,8 +148,6 @@ list(APPEND EXE_SRC GFSPOSTSIG.F INITPOST.F INITPOST_GFS_NEMS_MPIIO.f - INITPOST_GFS_NETCDF.f - INITPOST_GFS_NETCDF_PARA.f INITPOST_NEMS.f INITPOST_NETCDF.f WRFPOST.f @@ -227,7 +225,7 @@ if(BUILD_POSTEXEC) target_link_libraries(${EXENAME} PRIVATE wrf_io::wrf_io) endif() - install(TARGETS ${EXENAME} RUNTIME DESTINATION bin) + install(TARGETS ${EXENAME} RUNTIME DESTINATION ${exec_dir}) endif() install(DIRECTORY ${module_dir} DESTINATION ${CMAKE_INSTALL_PREFIX}) @@ -235,6 +233,6 @@ install(DIRECTORY ${module_dir} DESTINATION ${CMAKE_INSTALL_PREFIX}) install( TARGETS ${LIBNAME} EXPORT ${PROJECT_NAME}Exports - RUNTIME DESTINATION bin + RUNTIME DESTINATION ${exec_dir} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) diff --git a/sorc/ncep_post.fd/COLLECT.f b/sorc/ncep_post.fd/COLLECT.f index bcc8fab57..fc1a56f8f 100644 --- a/sorc/ncep_post.fd/COLLECT.f +++ b/sorc/ncep_post.fd/COLLECT.f @@ -1,35 +1,17 @@ !> @file -! -!> SUBPROGRAM: COLLECT GATHERS FROM ALL MPI TASKS -!! PRGRMMR: TUCCILLO ORG: IBM -!! -!! ABSTRACT: -!! GATHER "A" FROM ALL MPI TASKS ONTO TASK 0 -!! -!! PROGRAM HISTORY LOG: -!! 00-01-06 TUCCILLO - ORIGINAL -!! -!! USAGE: CALL COLLECT(A) -!! INPUT ARGUMENT LIST: -!! A - ARRAY BEING GATHERED -!! -!! OUTPUT ARGUMENT LIST: -!! A - GATHERED ARRAY - ONLY VALID ON TASK 0 -!! -!! OUTPUT FILES: -!! STDOUT - RUN TIME STANDARD OUT. -!! -!! SUBPROGRAMS CALLED: -!! MPI_GATHERV -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK.comm -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : IBM RS/6000 SP -!! +!> @brief Subroutine that collect gathers from all MPI tasks. +!> +!> @param[in] A Array being gathered. +!> @param[out] A gathered array - only valid on task 0. +!> +!> Gather "A" from all MPI tasks onto task 0. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2000-01-06 | Jim Tuccillo | Initial +!> +!> @author Jim Tuccillo IBM @date 2000-01-06 SUBROUTINE COLLECT (A, B) diff --git a/sorc/ncep_post.fd/COLLECT_LOC.f b/sorc/ncep_post.fd/COLLECT_LOC.f index 0d8ce1ff7..1fd6ea850 100644 --- a/sorc/ncep_post.fd/COLLECT_LOC.f +++ b/sorc/ncep_post.fd/COLLECT_LOC.f @@ -1,35 +1,17 @@ !> @file -! -!> SUBPROGRAM: COLLECT GATHERS FROM ALL MPI TASKS -!! PRGRMMR: TUCCILLO ORG: IBM -!! -!! ABSTRACT: -!! GATHER "A" FROM ALL MPI TASKS ONTO TASK 0 -!! -!! PROGRAM HISTORY LOG: -!! 00-01-06 TUCCILLO - ORIGINAL -!! -!! USAGE: CALL COLLECT(A) -!! INPUT ARGUMENT LIST: -!! A - ARRAY BEING GATHERED -!! -!! OUTPUT ARGUMENT LIST: -!! A - GATHERED ARRAY - ONLY VALID ON TASK 0 -!! -!! OUTPUT FILES: -!! STDOUT - RUN TIME STANDARD OUT. -!! -!! SUBPROGRAMS CALLED: -!! MPI_GATHERV -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK.comm -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : IBM RS/6000 SP -!! +!> @brief Subroutine that collect gathers from all MPI tasks. +!> +!> @param[in] A Array being gathered. +!> @param[out] A gathered array - only valid on task 0. +!> +!> Gather "A" from all MPI tasks onto task 0. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2000-01-06 | Jim Tuccillo | Initial +!> +!> @author Jim Tuccillo IBM @date 2000-01-06 SUBROUTINE COLLECT_LOC ( A, B ) diff --git a/sorc/ncep_post.fd/DEALLOCATE.f b/sorc/ncep_post.fd/DEALLOCATE.f index ecefcfbb4..ada0ddf80 100644 --- a/sorc/ncep_post.fd/DEALLOCATE.f +++ b/sorc/ncep_post.fd/DEALLOCATE.f @@ -1,35 +1,16 @@ !> @file -! -!> SUBPROGRAM: MPI_FIRST SET UP MESSGAE PASSING INFO -!! PRGRMMR: TUCCILLO ORG: IBM -!! -!! ABSTRACT: -!! SETS UP MESSAGE PASSING INFO -!! -!! PROGRAM HISTORY LOG: -!! 00-01-06 TUCCILLO - ORIGINAL -!! 01-10-25 H CHUANG - MODIFIED TO PROCESS HYBRID MODEL OUTPUT -!! 02-06-19 MIKE BALDWIN - WRF VERSION -!! -!! USAGE: CALL MPI_FIRST -!! INPUT ARGUMENT LIST: -!! -!! OUTPUT ARGUMENT LIST: -!! -!! OUTPUT FILES: -!! STDOUT - RUN TIME STANDARD OUT. -!! -!! SUBPROGRAMS CALLED: -!! PARA_RANGE -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK.comm -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : IBM RS/6000 SP -!! +!> @brief MPI_FIRST set up message passing info. +!> +!> This routine sets up message passing info. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2000-01-06 | Jim Tuccillo | Initial +!> 2001-10-25 | H Chuang | Modified to process hybrid model output +!> 2002-06-19 | Mike Baldwin | WRF version +!> +!> @author Jim Tuccillo IBM @date 2000-01-06 SUBROUTINE DE_ALLOCATE ! @@ -387,6 +368,7 @@ SUBROUTINE DE_ALLOCATE deallocate(tedir) deallocate(twa) deallocate(fdnsst) + deallocate(pwat) ! GSD deallocate(rainc_bucket) deallocate(rainc_bucket1) diff --git a/sorc/ncep_post.fd/DEWPOINT.f b/sorc/ncep_post.fd/DEWPOINT.f index 3d6d2b20e..1b962871d 100644 --- a/sorc/ncep_post.fd/DEWPOINT.f +++ b/sorc/ncep_post.fd/DEWPOINT.f @@ -1,51 +1,46 @@ !> @file -! -!> SUBPROGRAM: DEWPOINT COMPUTES DEWPOINTS FROM VAPOR PRESSURE -!! PRGMMR: J TUCCILLO ORG: W/NP2 DATE: 90-05-19 -!! -!! ABSTRACT: COMPUTES THE DEWPOINTS FOR THE N VALUES -!! OF VAPOR PRESSURE IN ARRAY VP. -!! THE FORMULA: -!! -!! VP = 0.611 * (X**A) * EXP( (A+B)*(1-X) ) -!! -!! IS USED TO GET DEWPOINT TEMPERATURE T, WHERE -!! -!! X = T3/T, T3=TRIPLE PT TEMPERATURE, -!! VP=VAPOR PRESSURE IN CBS, 0.611=VP AT T3, -!! A=(SPEC. HT. OF WATER-CSUBP OF VAPOR)/GAS CONST OF VAPOR -!! AND -!! B=LATENT HEAT AT T3/(GAS CONST OF VAPOR TIMES T3). -!! -!! ON THE FIRST CALL, A TABLE TDP IS CONSTRUCTED GIVING -!! DEWPOINT AS A FUNCTION OF VAPOR PRESSURE. -!! -!! VALUES OF VP LESS THAN THE FIRST TABLE ENTRY -!! (RVP1 IN THE CODE) WILL BE GIVEN DEWPOINTS FOR -!! THAT BEGINNING VALUE. SIMILARLY , VP VALUES THAT -!! EXCEED THE MAXIMUM TABLE VALUE (RVP2 IN THE CODE) -!! WILL BE ASSIGNED DEWPOINTS FOR THAT MAXIMUM VALUE. -!! -!! THE VALUES 0.02 AND 8.0 FOR RVP1 AND RVP2 YIELD -!! DEWPOINTS OF 233.6K AND 314.7K,RESPECTIVELY. -!! -!! PROGRAM HISTORY LOG: -!! - 90-05-19 J TUCCILLO -!! - 93-05-12 R TREADON - EXPANDED TABLE SIZE AND RESET -!! RANGE OF PRESSURES COVERED BY -!! TABLE. -!! - 98-06-12 T BLACK - CONVERSION FROM 1-D TO 2-D -!! - 00-01-04 JIM TUCCILLO - MPI VERSION -!! - 21-07-26 W Meng - Restrict computation from undefined grids -!! - 21-10-15 JESSE MENG - 2D DECOMPOSITION -!! -!! USAGE: CALL DEWPOINT( VP, TD) -!! INPUT ARGUMENT LIST: -!! VP - ARRAY OF N VAPOR PRESSURES(CENTIBARS) -!! -!! OUTPUT ARGUMENT LIST: -!! TD - DEWPOINT IN DEGREES ABSOLUTE -!! +!> @brief Subroutine that computes dewpoints from vapor pressure. +!> +!> This routine is to computes the dewpoints for the N values +!> of vapor pressure in array VP. +!> The forumla: +!> +!> VP = 0.611 * (X**A) * EXP( (A+B)*(1-X) ) +!> +!> is used to get dewpoint temperature T, where +!> +!> X = T3/T, T3=Triple PT temperature, +!> VP=Vapor pressure in CBS, 0.611=VP at T3, +!> A=(Spec. HT. of WATER-CSUBP of vapor)/gas const of vapor +!> and +!> B=Latent heat at T3/(gas const of vapor times T3). +!> +!> on the first call, a table TDP is constructed giving +!> dewpoint as a function of vapor pressure. +!> +!> Values of VP less than the first table entry +!> (RVP1 in the code) will be given dewpoints for +!> that beginning valus. Similarly, VP vaules that +!> exceed the maximum table value (RVP2 in the code) +!> will be assigned dewpoints for that maximum value. +!> +!> The values 0.02 and 8.0 for RVP1 and RVP2 yield +!> dewpoints of 233.6K and 314.7K,respectively. +!> +!> @param[in] VP Array of N vapor pressures(centibars). +!> @param[out] TD Dewpoint in degrees absolute. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1990-05-19 | Jim Tuccillo | Initial +!> 1993-05-12 | R Treadon | Expanded table size and reset range of pressures covered by table. +!> 1998-06-12 | T Black | Conversion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2021-07-26 | W Meng | Restrict computation from undefined grids +!> 2021-10-31 | J Meng | 2D Decomposition +!> +!> @author Jim Tuccillo W/NP2 @date 1990-05-19 SUBROUTINE DEWPOINT( VP, TD) use ctlblk_mod, only: jsta, jend, im, spval, ista, iend diff --git a/sorc/ncep_post.fd/INITPOST_GFS_NETCDF.f b/sorc/ncep_post.fd/INITPOST_GFS_NETCDF.f deleted file mode 100644 index b61732212..000000000 --- a/sorc/ncep_post.fd/INITPOST_GFS_NETCDF.f +++ /dev/null @@ -1,2761 +0,0 @@ -!> @file -! . . . -!> SUBPROGRAM: INITPOST_NETCDF INITIALIZE POST FOR RUN -!! PRGRMMR: Hui-Ya Chuang DATE: 2016-03-04 -!! -!! ABSTRACT: THIS ROUTINE INITIALIZES CONSTANTS AND -!! VARIABLES AT THE START OF GFS MODEL OR POST -!! PROCESSOR RUN. -!! -!! REVISION HISTORY -!! 2017-08-11 H Chuang start from INITPOST_GFS_NEMS_MPIIO.f -!! 2021-03-11 Bo Cui change local arrays to dimension (im,jsta:jend) -!! -!! USAGE: CALL INITPOST_NETCDF -!! INPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK -!! LOOKUP -!! SOILDEPTH -!! -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! - SUBROUTINE INITPOST_GFS_NETCDF(ncid3d) - - - use netcdf - use vrbls4d, only: dust, SALT, SUSO, SOOT, WASO, PP25, PP10 - use vrbls3d, only: t, q, uh, vh, pmid, pint, alpint, dpres, zint, zmid, o3, & - qqr, qqs, cwm, qqi, qqw, omga, rhomid, q2, cfr, rlwtt, rswtt, tcucn, & - tcucns, train, el_pbl, exch_h, vdifftt, vdiffmois, dconvmois, nradtt, & - o3vdiff, o3prod, o3tndy, mwpv, unknown, vdiffzacce, zgdrag,cnvctummixing, & - vdiffmacce, mgdrag, cnvctvmmixing, ncnvctcfrac, cnvctumflx, cnvctdmflx, & - cnvctzgdrag, sconvmois, cnvctmgdrag, cnvctdetmflx, duwt, duem, dusd, dudp, & - wh, qqg, ref_10cm - use vrbls2d, only: f, pd, fis, pblh, ustar, z0, ths, qs, twbs, qwbs, avgcprate, & - cprate, avgprec, prec, lspa, sno, si, cldefi, th10, q10, tshltr, pshltr, & - tshltr, albase, avgalbedo, avgtcdc, czen, czmean, mxsnal, radot, sigt4, & - cfrach, cfracl, cfracm, avgcfrach, qshltr, avgcfracl, avgcfracm, cnvcfr, & - islope, cmc, grnflx, vegfrc, acfrcv, ncfrcv, acfrst, ncfrst, ssroff, & - bgroff, rlwin, rlwtoa, cldwork, alwin, alwout, alwtoa, rswin, rswinc, & - rswout, aswin, auvbin, auvbinc, aswout, aswtoa, sfcshx, sfclhx, subshx, & - snopcx, sfcux, sfcvx, sfcuxi, sfcvxi, sfcuvx, gtaux, gtauy, potevp, u10, v10, smstav, & - smstot, ivgtyp, isltyp, sfcevp, sfcexc, acsnow, acsnom, sst, thz0, qz0, & - uz0, vz0, ptop, htop, pbot, hbot, ptopl, pbotl, ttopl, ptopm, pbotm, ttopm, & - ptoph, pboth, pblcfr, ttoph, runoff, maxtshltr, mintshltr, maxrhshltr, & - minrhshltr, dzice, smcwlt, suntime, fieldcapa, htopd, hbotd, htops, hbots, & - cuppt, dusmass, ducmass, dusmass25, ducmass25, aswintoa,rel_vort_maxhy1, & - maxqshltr, minqshltr, acond, sr, u10h, v10h,refd_max, w_up_max, w_dn_max, & - up_heli_max,up_heli_min,up_heli_max03,up_heli_min03,rel_vort_max01,u10max, v10max, & - avgedir,avgecan,avgetrans,avgesnow,avgprec_cont,avgcprate_cont,rel_vort_max, & - avisbeamswin,avisdiffswin,airbeamswin,airdiffswin,refdm10c_max,wspd10max, & - alwoutc,alwtoac,aswoutc,aswtoac,alwinc,aswinc,avgpotevp,snoavg, & - ti,aod550,du_aod550,ss_aod550,su_aod550,oc_aod550,bc_aod550 - use soil, only: sldpth, sh2o, smc, stc - use masks, only: lmv, lmh, htm, vtm, gdlat, gdlon, dx, dy, hbm2, sm, sice - use physcons_post, only: grav => con_g, fv => con_fvirt, rgas => con_rd, & - eps => con_eps, epsm1 => con_epsm1 - use params_mod, only: erad, dtr, tfrz, h1, d608, rd, p1000, capa,pi - use lookup_mod, only: thl, plq, ptbl, ttbl, rdq, rdth, rdp, rdthe, pl, qs0, sqs, sthe, & - ttblq, rdpq, rdtheq, stheq, the0q, the0 - use ctlblk_mod, only: me, mpi_comm_comp, icnt, idsp, jsta, jend, ihrst, idat, sdat, ifhr, & - ifmin, filename, tprec, tclod, trdlw, trdsw, tsrfc, tmaxmin, td3d, restrt, sdat, & - jend_m, imin, imp_physics, dt, spval, pdtop, pt, qmin, nbin_du, nphs, dtq2, ardlw,& - ardsw, asrfc, avrain, avcnvc, theat, gdsdegr, spl, lsm, alsl, im, jm, im_jm, lm, & - jsta_2l, jend_2u, nsoil, lp1, icu_physics, ivegsrc, novegtype, nbin_ss, nbin_bc, & - nbin_oc, nbin_su, gocart_on, pt_tbl, hyb_sigp, filenameFlux, fileNameAER,rdaod - use gridspec_mod, only: maptype, gridtype, latstart, latlast, lonstart, lonlast, cenlon, & - dxval, dyval, truelat2, truelat1, psmapf, cenlat,lonstartv, lonlastv, cenlonv, & - latstartv, latlastv, cenlatv,latstart_r,latlast_r,lonstart_r,lonlast_r - use upp_physics, only: fpvsnew -!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - implicit none -! -! type(nemsio_gfile) :: nfile,ffile,rfile - integer,parameter :: nvar2d=48 -! character(nemsio_charkind) :: name2d(nvar2d) - integer :: nvar3d, numDims -! character(nemsio_charkind), allocatable :: name3din(:), name3dout(:) -! character(nemsio_charkind) :: varname,levtype -! -! INCLUDE/SET PARAMETERS. -! - INCLUDE "mpif.h" - -! integer,parameter:: MAXPTS=1000000 ! max im*jm points -! -! real,parameter:: con_g =9.80665e+0! gravity -! real,parameter:: con_rv =4.6150e+2 ! gas constant H2O -! real,parameter:: con_rd =2.8705e+2 ! gas constant air -! real,parameter:: con_fvirt =con_rv/con_rd-1. -! real,parameter:: con_eps =con_rd/con_rv -! real,parameter:: con_epsm1 =con_rd/con_rv-1 -! -! This version of INITPOST shows how to initialize, open, read from, and -! close a NetCDF dataset. In order to change it to read an internal (binary) -! dataset, do a global replacement of _ncd_ with _int_. - - real, parameter :: gravi = 1.0/grav - character(len=20) :: VarName, VcoordName - integer :: Status, fldsize, fldst, recn, recn_vvel - character startdate*19,SysDepInfo*80,cgar*1 - character startdate2(19)*4 -! -! NOTE: SOME INTEGER VARIABLES ARE READ INTO DUMMY ( A REAL ). THIS IS OK -! AS LONG AS REALS AND INTEGERS ARE THE SAME SIZE. -! -! ALSO, EXTRACT IS CALLED WITH DUMMY ( A REAL ) EVEN WHEN THE NUMBERS ARE -! INTEGERS - THIS IS OK AS LONG AS INTEGERS AND REALS ARE THE SAME SIZE. - LOGICAL RUNB,SINGLRST,SUBPOST,NEST,HYDRO,IOOMG,IOALL -! logical, parameter :: debugprint = .true., zerout = .false. - logical, parameter :: debugprint = .false., zerout = .false. - logical :: convert_rad_to_deg=.false. - CHARACTER*32 varcharval -! CHARACTER*40 CONTRL,FILALL,FILMST,FILTMP,FILTKE,FILUNV,FILCLD,FILRAD,FILSFC - CHARACTER*4 RESTHR - CHARACTER FNAME*255,ENVAR*50 - INTEGER IDATE(8),JDATE(8),JPDS(200),JGDS(200),KPDS(200),KGDS(200) -! LOGICAL*1 LB(IM,JM) -! -! INCLUDE COMMON BLOCKS. -! -! DECLARE VARIABLES. -! -! REAL fhour -! integer nfhour ! forecast hour from nems io file - integer fhzero !bucket - real dtp !physics time step - REAL RINC(5) - -! REAL FI(IM,JM,2) - REAL DUMMY(IM,JM) - -!jw - integer ii,jj,js,je,iyear,imn,iday,itmp,ioutcount,istatus, & - I,J,L,ll,k,kf,irtn,igdout,n,Index,nframe, & - nframed2,iunitd3d,ierr,idum,iret,nrec,idrt - integer ncid3d,ncid2d,varid,nhcas - real TSTART,TLMH,TSPH,ES,FACT,soilayert,soilayerb,zhour,dum, & - tvll,pmll,tv, tx1, tx2 - - character*20,allocatable :: recname(:) - integer, allocatable :: reclev(:), kmsk(:,:) - real, allocatable :: glat1d(:), glon1d(:), qstl(:) - real, allocatable :: wrk1(:,:), wrk2(:,:) - real, allocatable :: p2d(:,:), t2d(:,:), q2d(:,:), & - qs2d(:,:), cw2d(:,:), cfr2d(:,:) - real(kind=4),allocatable :: vcoord4(:,:,:) - real, dimension(lm+1) :: ak5, bk5 - real*8, allocatable :: pm2d(:,:), pi2d(:,:) - real, allocatable :: tmp(:) - real :: buf(im,jsta_2l:jend_2u) - real :: buf3d(im,jsta_2l:jend_2u,lm) - -! real buf(im,jsta_2l:jend_2u),bufsoil(im,nsoil,jsta_2l:jend_2u) & -! ,buf3d(im,jsta_2l:jend_2u,lm),buf3d2(im,lp1,jsta_2l:jend_2u) - - real LAT - integer isa, jsa, latghf, jtem, idvc, idsl, nvcoord, ip1, nn, npass - - integer, parameter :: npass2=5, npass3=30 - real, parameter :: third=1.0/3.0 - INTEGER, DIMENSION(2) :: ij4min, ij4max - REAL :: omgmin, omgmax - real, allocatable :: d2d(:,:), u2d(:,:), v2d(:,:), omga2d(:,:) - REAL, ALLOCATABLE :: ps2d(:,:),psx2d(:,:),psy2d(:,:) - real, allocatable :: div3d(:,:,:) - real(kind=4),allocatable :: vcrd(:,:) - real :: dum_const - -!*********************************************************************** -! START INIT HERE. -! - WRITE(6,*)'INITPOST: ENTER INITPOST_GFS_NETCDF' - WRITE(6,*)'me=',me, & - 'jsta_2l=',jsta_2l,'jend_2u=', & - jend_2u,'im=',im -! - isa = im / 2 - jsa = (jsta+jend) / 2 - -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=1,im - buf(i,j) = spval - enddo - enddo - - Status=nf90_get_att(ncid3d,nf90_global,'ak',ak5) - if(Status/=0)then - print*,'ak not found; assigning missing value' - ak5=spval - else - if(me==0)print*,'ak5= ',ak5 - end if - Status=nf90_get_att(ncid3d,nf90_global,'idrt',idrt) - if(Status/=0)then - print*,'idrt not in netcdf file,reading grid' - Status=nf90_get_att(ncid3d,nf90_global,'grid',varcharval) - if(Status/=0)then - print*,'idrt and grid not in netcdf file, set default to latlon' - idrt=0 - MAPTYPE=0 - else - if(trim(varcharval)=='rotated_latlon')then - MAPTYPE=207 - idrt=207 - Status=nf90_get_att(ncid3d,nf90_global,'cen_lon',dum_const) - if(Status/=0)then - print*,'cen_lon not found; assigning missing value' - cenlon=spval - else - if(dum_const<0.)then - cenlon=nint((dum_const+360.)*gdsdegr) - else - cenlon=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'cen_lat',dum_const) - if(Status/=0)then - print*,'cen_lat not found; assigning missing value' - cenlat=spval - else - cenlat=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon1',dum_const) - if(Status/=0)then - print*,'lonstart_r not found; assigning missing value' - lonstart_r=spval - else - if(dum_const<0.)then - lonstart_r=nint((dum_const+360.)*gdsdegr) - else - lonstart_r=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat1',dum_const) - if(Status/=0)then - print*,'latstart_r not found; assigning missing value' - latstart_r=spval - else - latstart_r=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon2',dum_const) - if(Status/=0)then - print*,'lonlast_r not found; assigning missing value' - lonlast_r=spval - else - if(dum_const<0.)then - lonlast_r=nint((dum_const+360.)*gdsdegr) - else - lonlast_r=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat2',dum_const) - if(Status/=0)then - print*,'latlast_r not found; assigning missing value' - latlast_r=spval - else - latlast_r=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'dlon',dum_const) - if(Status/=0)then - print*,'dlmd not found; assigning missing value' - dxval=spval - else - dxval=dum_const*gdsdegr - end if - Status=nf90_get_att(ncid3d,nf90_global,'dlat',dum_const) - if(Status/=0)then - print*,'dphd not found; assigning missing value' - dyval=spval - else - dyval=dum_const*gdsdegr - end if - - print*,'lonstart,latstart,cenlon,cenlat,dyval,dxval', & - lonstart,latstart,cenlon,cenlat,dyval,dxval - -! Jili Dong add support for regular lat lon (2019/03/22) start - else if(trim(varcharval)=='latlon')then - MAPTYPE=0 - idrt=0 - - Status=nf90_get_att(ncid3d,nf90_global,'lon1',dum_const) - if(Status/=0)then - print*,'lonstart not found; assigning missing value' - lonstart=spval - else - if(dum_const<0.)then - lonstart=nint((dum_const+360.)*gdsdegr) - else - lonstart=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat1',dum_const) - if(Status/=0)then - print*,'latstart not found; assigning missing value' - latstart=spval - else - latstart=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon2',dum_const) - if(Status/=0)then - print*,'lonlast not found; assigning missing value' - lonlast=spval - else - if(dum_const<0.)then - lonlast=nint((dum_const+360.)*gdsdegr) - else - lonlast=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat2',dum_const) - if(Status/=0)then - print*,'latlast not found; assigning missing value' - latlast=spval - else - latlast=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'dlon',dum_const) - if(Status/=0)then - print*,'dlmd not found; assigning missing value' - dxval=spval - else - dxval=dum_const*gdsdegr - end if - Status=nf90_get_att(ncid3d,nf90_global,'dlat',dum_const) - if(Status/=0)then - print*,'dphd not found; assigning missing value' - dyval=spval - else - dyval=dum_const*gdsdegr - end if - - print*,'lonstart,latstart,dyval,dxval', & - lonstart,lonlast,latstart,latlast,dyval,dxval - -! Jili Dong add support for regular lat lon (2019/03/22) end - - else if(trim(varcharval)=='gaussian')then - MAPTYPE=4 - idrt=4 - else ! setting default maptype - MAPTYPE=0 - idrt=0 - end if - end if !end reading grid - end if !end reading idrt - if(me==0)print*,'idrt MAPTYPE= ',idrt,MAPTYPE -! STEP 1. READ MODEL OUTPUT FILE -! -! -!*** -! -! LMH and LMV always = LM for sigma-type vert coord - -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i = 1, im - LMV(i,j) = lm - LMH(i,j) = lm - end do - end do - -! HTM VTM all 1 for sigma-type vert coord - -!$omp parallel do private(i,j,l) - do l = 1, lm - do j = jsta_2l, jend_2u - do i = 1, im - HTM (i,j,l) = 1.0 - VTM (i,j,l) = 1.0 - end do - end do - end do - - Status=nf90_get_att(ncid3d,nf90_global,'nhcas',nhcas) - if(Status/=0)then - print*,'nhcas not in netcdf file, set default to nonhydro' - nhcas=0 - end if - if(me==0)print*,'nhcas= ',nhcas - if (nhcas == 0 ) then !non-hydrostatic case - nrec=15 - allocate (recname(nrec)) - recname=[character(len=20) :: 'ugrd','vgrd','spfh','tmp','o3mr', & - 'presnh','dzdt', 'clwmr','dpres', & - 'delz','icmr','rwmr', & - 'snmr','grle','cld_amt'] - else - nrec=8 - allocate (recname(nrec)) - recname=[character(len=20) :: 'ugrd','vgrd','tmp','spfh','o3mr', & - 'hypres', 'clwmr','dpres'] - endif - -! write(0,*)'nrec=',nrec - !allocate(recname(nrec),reclevtyp(nrec),reclev(nrec)) - allocate(glat1d(jm),glon1d(im)) - allocate(vcoord4(lm+1,3,2)) - -! hardwire idate for now -! idate=(/2017,08,07,00,0,0,0,0/) -! get cycle start time - Status=nf90_inq_varid(ncid3d,'time',varid) - if(Status/=0)then - print*,'time not in netcdf file, stopping' - STOP 1 - else - Status=nf90_get_att(ncid3d,varid,'units',varcharval) - if(Status/=0)then - print*,'time unit not available' - else - print*,'time unit read from netcdf file= ',varcharval -! assume use hours as unit -! idate_loc=index(varcharval,'since')+6 - read(varcharval,101)idate(1),idate(2),idate(3),idate(4),idate(5) - end if -! Status=nf90_inquire_dimension(ncid3d,varid,len=ntimes) -! allocate(fhours(ntimes)) -! status = nf90_inq_varid(ncid3d,varid,fhours) -! Status=nf90_get_var(ncid3d,varid,nfhour,start=(/1/), & -! count=(/1/)) -! if(Status/=0)then -! print*,'forecast hour not in netcdf file, stopping' -! STOP 1 -! end if - end if - 101 format(T13,i4,1x,i2,1x,i2,1x,i2,1x,i2) - print*,'idate= ',idate(1:5) -! get longitude - Status=nf90_inq_varid(ncid3d,'grid_xt',varid) - Status=nf90_inquire_variable(ncid3d,varid,ndims = numDims) - if(debugprint)print*,'number of dim for gdlon ',numDims - if(numDims==1)then - Status=nf90_get_var(ncid3d,varid,glon1d) - do j=jsta,jend - do i=1,im - gdlon(i,j) = real(glon1d(i),kind=4) - end do - end do - lonstart = nint(glon1d(1)*gdsdegr) - lonlast = nint(glon1d(im)*gdsdegr) - dxval = nint(abs(glon1d(1)-glon1d(2))*gdsdegr) - else if(numDims==2)then - Status=nf90_get_var(ncid3d,varid,dummy) - if(maxval(abs(dummy))<2.0*pi)convert_rad_to_deg=.true. - if(convert_rad_to_deg)then - do j=jsta,jend - do i=1,im - gdlon(i,j) = real(dummy(i,j),kind=4)*180./pi - end do - end do - else - do j=jsta,jend - do i=1,im - gdlon(i,j) = real(dummy(i,j),kind=4) - end do - end do - end if - if(convert_rad_to_deg)then - lonstart = nint(dummy(1,1)*gdsdegr)*180./pi - lonlast = nint(dummy(im,jm)*gdsdegr)*180./pi - dxval = nint(abs(dummy(1,1)-dummy(2,1))*gdsdegr)*180./pi - else - lonstart = nint(dummy(1,1)*gdsdegr) - lonlast = nint(dummy(im,jm)*gdsdegr) - dxval = nint(abs(dummy(1,1)-dummy(2,1))*gdsdegr) - end if - -! Jili Dong add support for regular lat lon (2019/03/22) start - if (MAPTYPE == 0) then - if(lonstart<0.)then - lonstart=lonstart+360.*gdsdegr - end if - if(lonlast<0.)then - lonlast=lonlast+360.*gdsdegr - end if - end if -! Jili Dong add support for regular lat lon (2019/03/22) end - - end if - print*,'lonstart,lonlast,dxval ',lonstart,lonlast,dxval -! get latitude - Status=nf90_inq_varid(ncid3d,'grid_yt',varid) - Status=nf90_inquire_variable(ncid3d,varid,ndims = numDims) - if(debugprint)print*,'number of dim for gdlat ',numDims - if(numDims==1)then - Status=nf90_get_var(ncid3d,varid,glat1d) - do j=jsta,jend - do i=1,im - gdlat(i,j) = real(glat1d(j),kind=4) - end do - end do - latstart = nint(glat1d(1)*gdsdegr) - latlast = nint(glat1d(jm)*gdsdegr) - dyval = nint(abs(glat1d(1)-glat1d(2))*gdsdegr) - else if(numDims==2)then - Status=nf90_get_var(ncid3d,varid,dummy) - if(maxval(abs(dummy))1.0e5)print*,'bad dpres ',i,j,dpres(i,j,l) -!make sure delz is positive -! if(dpres(i,j,l)/=spval .and. t(i,j,l)/=spval .and. & -! q(i,j,l)/=spval .and. buf3d(i,j,l)/=spval)then -! pmid(i,j,l)=rgas*dpres(i,j,l)* & -! t(i,j,l)*(q(i,j,l)*fv+1.0)/grav/abs(buf3d(i,j,l)) -! else -! pmid(i,j,l)=spval -! end if -! dong add missing value - if (wh(i,j,l) < spval) then - omga(i,j,l)=(-1.)*wh(i,j,l)*dpres(i,j,l)/abs(buf3d(i,j,l)) - else - omga(i,j,l) = spval - end if -! if(t(i,j,l)>1000.)print*,'bad T ',t(i,j,l) - enddo - enddo - enddo - call read_netcdf_3d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,recname(11) & - ,lm,qqi(1,jsta_2l,1)) - call read_netcdf_3d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,recname(12) & - ,lm,qqr(1,jsta_2l,1)) - call read_netcdf_3d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,recname(13) & - ,lm,qqs(1,jsta_2l,1)) - call read_netcdf_3d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,recname(14) & - ,lm,qqg(1,jsta_2l,1)) - call read_netcdf_3d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,recname(15) & - ,lm,cfr(1,jsta_2l,1)) -! calculate CWM from FV3 output - do l=1,lm - do j=jsta,jend - do i=1,im - cwm(i,j,l)=qqg(i,j,l)+qqs(i,j,l)+qqr(i,j,l)+qqi(i,j,l)+qqw(i,j,l) - enddo - enddo - if(debugprint)print*,'sample l,t,q,u,v,w,= ',isa,jsa,l & - ,t(isa,jsa,l),q(isa,jsa,l),uh(isa,jsa,l),vh(isa,jsa,l) & - ,wh(isa,jsa,l) - if(debugprint)print*,'sample l cwm for FV3',l, & - cwm(isa,jsa,l) - end do -! max hourly updraft velocity -! VarName='upvvelmax' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,w_up_max) -! if(debugprint)print*,'sample ',VarName,' = ',w_up_max(isa,jsa) - -! max hourly downdraft velocity -! VarName='dnvvelmax' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,w_dn_max) -! if(debugprint)print*,'sample ',VarName,' = ',w_dn_max(isa,jsa) -! max hourly updraft helicity -! VarName='uhmax25' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,up_heli_max) -! if(debugprint)print*,'sample ',VarName,' = ',up_heli_max(isa,jsa) -! min hourly updraft helicity -! VarName='uhmin25' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,up_heli_min) -! if(debugprint)print*,'sample ',VarName,' = ',up_heli_min(isa,jsa) -! max hourly 0-3km updraft helicity -! VarName='uhmax03' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,up_heli_max03) -! if(debugprint)print*,'sample ',VarName,' = ',up_heli_max03(isa,jsa) -! min hourly 0-3km updraft helicity -! VarName='uhmin03' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,up_heli_min03) -! if(debugprint)print*,'sample ',VarName,' = ',up_heli_min03(isa,jsa) - -! max 0-1km relative vorticity max -! VarName='maxvort01' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rel_vort_max01) -! if(debugprint)print*,'sample ',VarName,' = ',rel_vort_max01(isa,jsa) -! max 0-2km relative vorticity max -! VarName='maxvort02' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rel_vort_max) -! if(debugprint)print*,'sample ',VarName,' =',rel_vort_max(isa,jsa) -! max hybrid lev 1 relative vorticity max -! VarName='maxvorthy1' -! call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rel_vort_maxhy1) -! if(debugprint)print*,'sample ',VarName,' =',rel_vort_maxhy1(isa,jsa) -! surface pressure - VarName='pressfc' - call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName & - ,pint(1,jsta_2l,lp1)) - do j=jsta,jend - do i=1,im -! if(pint(i,j,lp1)>1.0E6 .or. pint(1,jsta_2l,lp1)<50000.) & -! print*,'bad psfc ',i,j,pint(i,j,lp1) - end do - end do - if(debugprint)print*,'sample ',VarName,' =',pint(isa,jsa,lp1) - - pt = ak5(1) - - do j=jsta,jend - do i=1,im - pint(i,j,1)= pt - end do - end do - - do l=2,lp1 - do j=jsta,jend - do i=1,im - pint(i,j,l) = pint(i,j,l-1) + dpres(i,j,l-1) - enddo - enddo -! if (me == 0) print*,'sample model pint,pmid' ,ii,jj,l & -! ,pint(ii,jj,l),pmid(ii,jj,l) - end do - -!compute pmid from averaged two layer pint - do l=lm,1,-1 - do j=jsta,jend - do i=1,im - pmid(i,j,l) = 0.5*(pint(i,j,l)+pint(i,j,l+1)) - enddo - enddo - enddo - -! do l=lm,1,-1 -! do j=jsta,jend -! do i=1,im -! if(pint(i,j,l+1)/=spval .and. dpres(i,j,l)/=spval)then -! pint(i,j,l)=pint(i,j,l+1)-dpres(i,j,l) -! else -! pint(i,j,l)=spval -! end if -! end do -! end do -! print*,'sample pint= ',isa,jsa,l,pint(isa,jsa,l) -! end do - -! surface height from FV3 -! dong set missing value for zint -! zint=spval - VarName='hgtsfc' - call read_netcdf_2d_scatter(me,ncid3d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName & - ,zint(1,jsta_2l,lp1)) - if(debugprint)print*,'sample ',VarName,' =',zint(isa,jsa,lp1) - do j=jsta,jend - do i=1,im - if (zint(i,j,lp1) /= spval) then - fis(i,j) = zint(i,j,lp1) * grav - else - fis(i,j) = spval - endif - enddo - enddo - - do l=lm,1,-1 - do j=jsta,jend - do i=1,im - if(zint(i,j,l+1)/=spval .and. buf3d(i,j,l)/=spval)then -!make sure delz is positive - zint(i,j,l)=zint(i,j,l+1)+abs(buf3d(i,j,l)) -! if(zint(i,j,l)>1.0E6)print*,'bad H ',i,j,l,zint(i,j,l) - else - zint(i,j,l)=spval - end if - end do - end do - print*,'sample zint= ',isa,jsa,l,zint(isa,jsa,l) - end do - - do l=lp1,1,-1 - do j=jsta,jend - do i=1,im - alpint(i,j,l)=log(pint(i,j,l)) - end do - end do - end do - - do l=lm,1,-1 - do j=jsta,jend - do i=1,im - if(zint(i,j,l+1)/=spval .and. zint(i,j,l)/=spval & - .and. pmid(i,j,l)/=spval)then - zmid(i,j,l)=zint(i,j,l+1)+(zint(i,j,l)-zint(i,j,l+1))* & - (log(pmid(i,j,l))-alpint(i,j,l+1))/ & - (alpint(i,j,l)-alpint(i,j,l+1)) - if(zmid(i,j,l)>1.0E6)print*,'bad Hmid ',i,j,l,zmid(i,j,l) - else - zmid(i,j,l)=spval - endif - end do - end do - end do - - - pt = ak5(1) - -! else -! do l=2,lm -!!$omp parallel do private(i,j) -! do j=jsta,jend -! do i=1,im -! pint(i,j,l) = pint(i,j,l-1) + dpres(i,j,l-1) -! enddo -! enddo -! if (me == 0) print*,'sample pint,pmid' ,ii,jj,l,pint(ii,jj,l),pmid(ii,jj,l) -! end do -! endif -! - - deallocate (vcoord4) -!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -! - -! done with 3d file, close it for now - Status=nf90_close(ncid3d) - deallocate(recname) - -! open flux file - Status = nf90_open(trim(fileNameFlux),NF90_NOWRITE, ncid2d) - - if ( Status /= 0 ) then - print*,'error opening ',fileNameFlux, ' Status = ', Status - print*,'skip reading of flux file' - endif - -! IVEGSRC=1 for IGBP, 0 for USGS, 2 for UMD - VarName='IVEGSRC' - Status=nf90_get_att(ncid2d,nf90_global,'IVEGSRC',IVEGSRC) - if (Status /= 0) then - print*,VarName,' not found-Assigned 1 for IGBP as default' - IVEGSRC=1 - end if - if (me == 0) print*,'IVEGSRC= ',IVEGSRC - -! set novegtype based on vegetation classification - if(ivegsrc==2)then - novegtype=13 - else if(ivegsrc==1)then - novegtype=20 - else if(ivegsrc==0)then - novegtype=24 - end if - if (me == 0) print*,'novegtype= ',novegtype - - Status=nf90_get_att(ncid2d,nf90_global,'imp_physics',imp_physics) - if (Status /= 0) then - print*,VarName,' not found-Assigned 11 GFDL as default' - imp_physics=11 - end if - if (me == 0) print*,'MP_PHYSICS= ',imp_physics -! - Status=nf90_get_att(ncid2d,nf90_global,'fhzero',fhzero) - if (Status /= 0) then - print*,VarName,' not found-Assigned 3 hours as default' - fhzero=3 - end if - if (me == 0) print*,'fhzero= ',fhzero -! - Status=nf90_get_att(ncid2d,nf90_global,'dtp',dtp) - if (Status /= 0) then - print*,VarName,' not found-Assigned 90s as default' - dtp=90 - end if - if (me == 0) print*,'dtp= ',dtp -! Initializes constants for Ferrier microphysics - if(imp_physics==5 .or. imp_physics==85 .or. imp_physics==95) then - CALL MICROINIT(imp_physics) - end if - -! Chuang: zhour is when GFS empties bucket last so using this -! to compute buket will result in changing bucket with forecast time. -! set default bucket for now - -! call nemsio_getheadvar(ffile,'zhour',zhour,iret=iret) -! if(iret == 0) then -! tprec = 1.0*ifhr-zhour -! tclod = tprec -! trdlw = tprec -! trdsw = tprec -! tsrfc = tprec -! tmaxmin = tprec -! td3d = tprec -! print*,'tprec from flux file header= ',tprec -! else -! print*,'Error reading accumulation bucket from flux file', & -! 'header - will try to read from env variable FHZER' -! CALL GETENV('FHZER',ENVAR) -! read(ENVAR, '(I2)')idum -! tprec = idum*1.0 -! tclod = tprec -! trdlw = tprec -! trdsw = tprec -! tsrfc = tprec -! tmaxmin = tprec -! td3d = tprec -! print*,'TPREC from FHZER= ',tprec -! end if - - - tprec = float(fhzero) - if(ifhr>240)tprec=12. - tclod = tprec - trdlw = tprec - trdsw = tprec - tsrfc = tprec - tmaxmin = tprec - td3d = tprec - print*,'tprec = ',tprec - - -! start reading 2d netcdf file -! surface pressure -! VarName='pressfc' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName & -! ,pint(1,jsta_2l,lp1)) -! if(debugprint)print*,'sample ',VarName,' =',pint(isa,jsa,lp1) -! do l=lm,1,-1 -! do j=jsta,jend -! do i=1,im -! pint(i,j,l)=pint(i,j,l+1)-dpres(i,j,l) -! if(pint(i,j,l)>1.0E6)print*,'bad P ',i,j,l,pint(i,j,l) & -! ,pint(i,j,l+1),dpres(i,j,l) -! end do -! end do -! print*,'sample pint= ',isa,jsa,l,pint(isa,jsa,l) -! end do -! surface height from FV3 already multiplied by G -! VarName='orog' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,fis) -! if(debugprint)print*,'sample ',VarName,' =',fis(isa,jsa) -! do j=jsta,jend -! do i=1,im -! if (fis(i,j) /= spval) then -! zint(i,j,lp1) = fis(i,j) -! fis(i,j) = fis(i,j) * grav -! else -! zint(i,j,lp1) = spval -! fis(i,j) = spval -! endif -! enddo -! enddo - -! do l=lm,1,-1 -! do j=jsta,jend -! do i=1,im -! if(zint(i,j,l+1)/=spval .and. buf3d(i,j,l)/=spval)then -! zint(i,j,l)=zint(i,j,l+1)+buf3d(i,j,l) -! if(zint(i,j,l)>1.0E6)print*,'bad H ',i,j,l,zint(i,j,l) -! else -! zint(i,j,l)=spval -! end if -! end do -! end do -! print*,'sample zint= ',isa,jsa,l,zint(isa,jsa,l) -! end do - -! Per communication with Fanglin, P from model in not monotonic -! so compute P using ak and bk for now Sep. 2017 -! do l=lm,1,-1 -!!!$omp parallel do private(i,j) -! do j=jsta,jend -! do i=1,im -! pint(i,j,l) = ak5(lm+2-l) + bk5(lm+2-l)*pint(i,j,lp1) -! pmid(i,j,l) = 0.5*(pint(i,j,l)+pint(i,j,l+1)) ! for now - -! enddo -! enddo -! print*,'sample pint,pmid' & -! ,l,pint(isa,jsa,l),pmid(isa,jsa,l) -! enddo - -! allocate(wrk1(im,jsta:jend),wrk2(im,jsta:jend)) -! do j=jsta,jend -! do i=1,im -! pd(i,j) = spval ! GFS does not output PD -! pint(i,j,1) = PT -! alpint(i,j,lp1) = log(pint(i,j,lp1)) -! wrk1(i,j) = log(PMID(I,J,LM)) -! wrk2(i,j) = T(I,J,LM)*(Q(I,J,LM)*fv+1.0) -! FI(I,J,1) = FIS(I,J) & -! + wrk2(i,j)*rgas*(ALPINT(I,J,Lp1)-wrk1(i,j)) -! ZMID(I,J,LM) = FI(I,J,1) * gravi -! end do -! end do - -! SECOND, INTEGRATE HEIGHT HYDROSTATICLY, GFS integrate height on -! mid-layer - -! DO L=LM,2,-1 ! omit computing model top height -! ll = l - 1 -! do j = jsta, jend -! do i = 1, im -! alpint(i,j,l) = log(pint(i,j,l)) -! tvll = T(I,J,LL)*(Q(I,J,LL)*fv+1.0) -! pmll = log(PMID(I,J,LL)) - -! FI(I,J,2) = FI(I,J,1) + (0.5*rgas)*(wrk2(i,j)+tvll) & -! * (wrk1(i,j)-pmll) -! ZMID(I,J,LL) = FI(I,J,2) * gravi -! -! FACT = (ALPINT(I,J,L)-wrk1(i,j)) / (pmll-wrk1(i,j)) -! ZINT(I,J,L) = ZMID(I,J,L) +(ZMID(I,J,LL)-ZMID(I,J,L))*FACT -! FI(I,J,1) = FI(I,J,2) -! wrk1(i,J) = pmll -! wrk2(i,j) = tvll -! ENDDO -! ENDDO - -! print*,'L ZINT= ',l,zint(isa,jsa,l),ZMID(isa,jsa,l) -! ,'alpint=',ALPINT(ii,jj,l),'pmid=',LOG(PMID(Ii,Jj,L)), & -! 'pmid(l-1)=',LOG(PMID(Ii,Jj,L-1)),'zmd=',ZMID(Ii,Jj,L), & -! 'zmid(l-1)=',ZMID(Ii,Jj,L-1) -! ENDDO -! deallocate(wrk1,wrk2) - -! do l=lp1,2,-1 -! do j=jsta,jend -! do i=1,im -! alpint(i,j,l)=log(pint(i,j,l)) -! end do -! end do -! end do - -! do l=lm,2,-1 -! do j=jsta,jend -! do i=1,im -! zmid(i,j,l)=zint(i,j,l+1)+(zint(i,j,l)-zint(i,j,l+1))* & -! (log(pmid(i,j,l))-alpint(i,j,l+1))/ & -! (alpint(i,j,l)-alpint(i,j,l+1)) -! if(zmid(i,j,l)>1.0E6)print*,'bad Hmid ',i,j,l,zmid(i,j,l) -! end do -! end do -! end do - -! VarName='refl_10cm' -! do l=1,lm -! call read_netcdf_3d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName & -! ,lm,REF_10CM(1,jsta_2l,1)) -! if(debugprint)print*,'sample ',VarName,'isa,jsa,l =' & -! ,REF_10CM(isa,jsa,l),isa,jsa,l -! enddo -!Set REF_10CM as missning since gfs doesn't ouput it - do l=1,lm - do j=jsta,jend - do i=1,im - REF_10CM(i,j,l)=spval - enddo - enddo - enddo - - VarName='land' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sm) - if(debugprint)print*,'sample ',VarName,' =',sm(im/2,(jsta+jend)/2) - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= spval) sm(i,j) = 1.0 - sm(i,j) - enddo - enddo - -! sea ice mask - - VarName = 'icec' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sice) - if(debugprint)print*,'sample ',VarName,' = ',sice(isa,jsa) - -! where(sice /=spval .and. sice >=1.0)sm=0.0 !sea ice has sea -! mask=0 -! GFS flux files have land points with non-zero sea ice, per Iredell, -! these -! points have sea ice changed to zero, i.e., trust land mask more than -! sea ice -! where(sm/=spval .and. sm==0.0)sice=0.0 !specify sea ice=0 at land - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= spval .and. sm(i,j) == 0.0) sice(i,j) = 0.0 - enddo - enddo - - -! PBL height using nemsio - VarName = 'hpbl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pblh) - if(debugprint)print*,'sample ',VarName,' = ',pblh(isa,jsa) - -! frictional velocity using nemsio - VarName='fricv' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ustar) -! if(debugprint)print*,'sample ',VarName,' = ',ustar(isa,jsa) - -! roughness length using getgb - VarName='sfcr' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,z0) -! if(debugprint)print*,'sample ',VarName,' = ',z0(isa,jsa) - -! sfc exchange coeff - VarName='sfexc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,SFCEXC) - -! aerodynamic conductance - VarName='acond' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,acond) - if(debugprint)print*,'sample ',VarName,' = ',acond(isa,jsa) -! mid day avg albedo - VarName='albdo_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgalbedo) - if(debugprint)print*,'sample ',VarName,' = ',avgalbedo(isa,jsa) - do j=jsta,jend - do i=1,im - if (avgalbedo(i,j) /= spval) avgalbedo(i,j) = avgalbedo(i,j) * 0.01 - enddo - enddo - -! surface potential T using getgb - VarName='tmpsfc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ths) - -! where(ths/=spval)ths=ths*(p1000/pint(:,:,lp1))**CAPA ! convert to THS - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (ths(i,j) /= spval) then -! write(0,*)' i=',i,' j=',j,' ths=',ths(i,j),' pint=',pint(i,j,lp1) - ths(i,j) = ths(i,j) * (p1000/pint(i,j,lp1))**capa - endif - QS(i,j) = SPVAL ! GFS does not have surface specific humidity - twbs(i,j) = SPVAL ! GFS does not have inst sensible heat flux - qwbs(i,j) = SPVAL ! GFS does not have inst latent heat flux -!assign sst - if (sm(i,j) /= 0.0) then - sst(i,j) = ths(i,j) * (pint(i,j,lp1)/p1000)**capa - else - sst(i,j) = spval - endif - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',ths(isa,jsa) - - -! GFS does not have time step and physics time step, make up ones since they -! are not really used anyway -! NPHS=1. -! DT=90. -! DTQ2 = DT * NPHS !MEB need to get physics DT - DTQ2 = DTP !MEB need to get physics DT - NPHS=1 - DT = DTQ2/NPHS !MEB need to get DT - TSPH = 3600./DT - -! convective precip in m per physics time step using getgb -! read 3 hour bucket - VarName='cpratb_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgcprate) -! where(avgcprate /= spval)avgcprate=avgcprate*dtq2/1000. ! convert to m -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgcprate(i,j) /= spval) avgcprate(i,j) = avgcprate(i,j) * (dtq2*0.001) - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgcprate(isa,jsa) - -! print*,'maxval CPRATE: ', maxval(CPRATE) - -! read continuous bucket - VarName='cprat_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgcprate_cont) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgcprate_cont(i,j) /= spval) avgcprate_cont(i,j) = & - avgcprate_cont(i,j) * (dtq2*0.001) - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgcprate_cont(isa,jsa) - -! print*,'maxval CPRATE: ', maxval(CPRATE) - -! precip rate in m per physics time step using getgb - VarName='prateb_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgprec) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if(avgprec(i,j) /= spval)avgprec(i,j)=avgprec(i,j)*(dtq2*0.001) - enddo - enddo - - if(debugprint)print*,'sample ',VarName,' = ',avgprec(isa,jsa) - -! prec = avgprec !set avg cprate to inst one to derive other fields - - VarName='prate_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgprec_cont) -! where(avgprec /= spval)avgprec=avgprec*dtq2/1000. ! convert to m -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgprec_cont(i,j) /=spval)avgprec_cont(i,j)=avgprec_cont(i,j) & - * (dtq2*0.001) - enddo - enddo - - if(debugprint)print*,'sample ',VarName,' = ',avgprec_cont(isa,jsa) -! precip rate in m per physics time step - VarName='tprcp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,prec) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (prec(i,j) /= spval) prec(i,j)=prec(i,j)* (dtq2*0.001) & - * 1000. / dtp - enddo - enddo - -! convective precip rate in m per physics time step - VarName='cnvprcp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,cprate) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (cprate(i,j) /= spval) then - cprate(i,j) = max(0.,cprate(i,j)) * (dtq2*0.001) & - * 1000. / dtp - else - cprate(i,j) = 0. - endif - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',cprate(isa,jsa) - -! GFS does not have accumulated total, gridscale, and convective precip, will use inst precip to derive in SURFCE.f - -! max hourly 1-km agl reflectivity -! VarName='refdmax' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,refd_max) -! if(debugprint)print*,'sample ',VarName,' = ',refd_max(isa,jsa) -! max hourly -10C reflectivity -! VarName='refdmax263k' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,refdm10c_max) -! if(debugprint)print*,'sample ',VarName,' = ',refdm10c_max(isa,jsa) - -! max hourly u comp of 10m agl wind -! VarName='u10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,u10max) -! if(debugprint)print*,'sample ',VarName,' = ',u10max(isa,jsa) -! max hourly v comp of 10m agl wind -! VarName='v10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,v10max) -! if(debugprint)print*,'sample ',VarName,' = ',v10max(isa,jsa) -! max hourly 10m agl wind speed -! VarName='spd10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,wspd10max) -! if(debugprint)print*,'sample ',VarName,' = ',wspd10max(isa,jsa) - - -! 2m T using nemsio - VarName='tmp2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,tshltr) - if(debugprint)print*,'sample ',VarName,' = ',tshltr(isa,jsa) - -! inst snow water eqivalent using nemsio - VarName='weasd' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sno) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j)==0.) sno(i,j) = spval - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',sno(isa,jsa) - -! ave snow cover - VarName='snowc_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,snoavg) -! snow cover is multipled by 100 in SURFCE before writing it out - do j=jsta,jend - do i=1,im - if (sm(i,j)==1.0 .and. sice(i,j)==0.) snoavg(i,j)=spval - if(snoavg(i,j)/=spval)snoavg(i,j)=snoavg(i,j)/100. - end do - end do - -! snow depth in mm using nemsio - VarName='snod' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,si) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j)==1.0 .and. sice(i,j)==0.) si(i,j)=spval - if (si(i,j) /= spval) si(i,j) = si(i,j) * 1000.0 - CLDEFI(i,j) = SPVAL ! GFS does not have convective cloud efficiency - lspa(i,j) = spval ! GFS does not have similated precip - TH10(i,j) = SPVAL ! GFS does not have 10 m theta - TH10(i,j) = SPVAL ! GFS does not have 10 m theta - Q10(i,j) = SPVAL ! GFS does not have 10 m humidity - ALBASE(i,j) = SPVAL ! GFS does not have snow free albedo - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',si(isa,jsa) - -! 2m T using nemsio - VarName='tmp2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,tshltr) - if(debugprint)print*,'sample ',VarName,' = ',tshltr(isa,jsa) - -! GFS does not have 2m pres, estimate it, also convert t to theta - Do j=jsta,jend - Do i=1,im - PSHLTR(I,J)=pint(I,J,lm+1)*EXP(-0.068283/tshltr(i,j)) - tshltr(i,j)= tshltr(i,j)*(p1000/PSHLTR(I,J))**CAPA ! convert to theta -! if (j == jm/2 .and. mod(i,50) == 0) -! + print*,'sample 2m T and P after scatter= ' -! + ,i,j,tshltr(i,j),pshltr(i,j) - end do - end do - -! 2m specific humidity using nemsio - VarName='spfh2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,qshltr) - if(debugprint)print*,'sample ',VarName,' = ',qshltr(isa,jsa) - -! mid day avg albedo in fraction using nemsio -! VarName='albdosfc' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgalbedo) -!! where(avgalbedo /= spval)avgalbedo=avgalbedo/100. ! convert to fraction -!!$omp parallel do private(i,j) -! do j=jsta,jend -! do i=1,im -! if (avgalbedo(i,j) /= spval) avgalbedo(i,j) = avgalbedo(i,j) * 0.01 -! enddo -! enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgalbedo(isa,jsa) - -! time averaged column cloud fractionusing nemsio - VarName='tcdc_aveclm' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgtcdc) -! where(avgtcdc /= spval)avgtcdc=avgtcdc/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgtcdc(i,j) /= spval) avgtcdc(i,j) = avgtcdc(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgtcdc(isa,jsa) - -! GFS probably does not use zenith angle -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - Czen(i,j) = spval - CZMEAN(i,j) = SPVAL - enddo - enddo - -! maximum snow albedo in fraction using nemsio - VarName='snoalb' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,mxsnal) -! where(mxsnal /= spval)mxsnal=mxsnal/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (mxsnal(i,j) /= spval) mxsnal(i,j) = mxsnal(i,j) * 0.01 - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',mxsnal(isa,jsa) - -! GFS probably does not use sigt4, set it to sig*t^4 -!$omp parallel do private(i,j,tlmh) - Do j=jsta,jend - Do i=1,im - TLMH = T(I,J,LM) * T(I,J,LM) - Sigt4(i,j) = 5.67E-8 * TLMH * TLMH - End do - End do - -! TG is not used, skip it for now - -! GFS does not have inst cloud fraction for high, middle, and low cloud -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - cfrach(i,j) = spval - cfracl(i,j) = spval - cfracm(i,j) = spval - enddo - enddo - -! ave high cloud fraction using nemsio - VarName='tcdc_avehcl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgcfrach) -! where(avgcfrach /= spval)avgcfrach=avgcfrach/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgcfrach(i,j) /= spval) avgcfrach(i,j) = avgcfrach(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfrach(isa,jsa) - -! ave low cloud fraction using nemsio - VarName='tcdc_avelcl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgcfracl) -! where(avgcfracl /= spval)avgcfracl=avgcfracl/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgcfracl(i,j) /= spval) avgcfracl(i,j) = avgcfracl(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfracl(isa,jsa) - -! ave middle cloud fraction using nemsio - VarName='tcdc_avemcl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgcfracm) -! where(avgcfracm /= spval)avgcfracm=avgcfracm/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (avgcfracm(i,j) /= spval) avgcfracm(i,j) = avgcfracm(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfracm(isa,jsa) - -! inst convective cloud fraction using nemsio - VarName='tcdccnvcl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,cnvcfr) -! where(cnvcfr /= spval)cnvcfr=cnvcfr/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (cnvcfr(i,j) /= spval) cnvcfr (i,j)= cnvcfr(i,j) * 0.01 - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',cnvcfr(isa,jsa) - -! slope type using nemsio - VarName='sltyp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,buf) -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=1,im - if (buf(i,j) < spval) then - islope(i,j) = nint(buf(i,j)) - else - islope(i,j) = 0 - endif - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',islope(isa,jsa) - -! plant canopy sfc wtr in m - VarName='cnwat' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,cmc) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (cmc(i,j) /= spval) cmc(i,j) = cmc(i,j) * 0.001 - if (sm(i,j) /= 0.0) cmc(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',cmc(isa,jsa) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - grnflx(i,j) = spval ! GFS does not have inst ground heat flux - enddo - enddo - -! frozen precip fraction using nemsio - VarName='cpofp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sr) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if(sr(i,j) /= spval) then -!set range within (0,1) - sr(i,j)=min(1.,max(0.,sr(i,j))) - endif - enddo - enddo - -! sea ice skin temperature - VarName='tisfc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ti) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sice(i,j) == spval .or. sice(i,j) == 0.) ti(i,j)=spval - enddo - enddo - -! vegetation fraction in fraction. using nemsio - VarName='veg' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,vegfrc) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (vegfrc(i,j) /= spval) then - vegfrc(i,j) = vegfrc(i,j) * 0.01 - else - vegfrc(i,j) = 0.0 - endif - enddo - enddo -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) vegfrc(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',vegfrc(isa,jsa) - -! GFS doesn not yet output soil layer thickness, assign SLDPTH to be the same as nam - - SLDPTH(1) = 0.10 - SLDPTH(2) = 0.3 - SLDPTH(3) = 0.6 - SLDPTH(4) = 1.0 - -! liquid volumetric soil mpisture in fraction using nemsio - VarName='soill1' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sh2o(1,jsta_2l,1)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) sh2o(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,1) - - VarName='soill2' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sh2o(1,jsta_2l,2)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) sh2o(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,2) - - VarName='soill3' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sh2o(1,jsta_2l,3)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) sh2o(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,3) - - VarName='soill4' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sh2o(1,jsta_2l,4)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) sh2o(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,4) - -! volumetric soil moisture using nemsio - VarName='soilw1' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smc(1,jsta_2l,1)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smc(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,1) - - VarName='soilw2' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smc(1,jsta_2l,2)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smc(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,2) - - VarName='soilw3' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smc(1,jsta_2l,3)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smc(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,3) - - VarName='soilw4' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smc(1,jsta_2l,4)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smc(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,4) - -! soil temperature using nemsio - VarName='soilt1' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,stc(1,jsta_2l,1)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,1) = spval - !if (sm(i,j) /= 0.0) stc(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l','stc',' = ',1,stc(isa,jsa,1) - - VarName='soilt2' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,stc(1,jsta_2l,2)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,2) = spval - !if (sm(i,j) /= 0.0) stc(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,2) - - VarName='soilt3' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,stc(1,jsta_2l,3)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,3) = spval - !if (sm(i,j) /= 0.0) stc(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,3) - - VarName='soilt4' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,stc(1,jsta_2l,4)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,4) = spval - !if (sm(i,j) /= 0.0) stc(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,4) - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - acfrcv(i,j) = spval ! GFS does not output time averaged convective and strat cloud fraction, set acfrcv to spval, ncfrcv to 1 - ncfrcv(i,j) = 1.0 - acfrst(i,j) = spval ! GFS does not output time averaged cloud fraction, set acfrst to spval, ncfrst to 1 - ncfrst(i,j) = 1.0 - bgroff(i,j) = spval ! GFS does not have UNDERGROUND RUNOFF - rlwtoa(i,j) = spval ! GFS does not have inst model top outgoing longwave - enddo - enddo -! trdlw(i,j) = 6.0 - ardlw = 1.0 ! GFS incoming sfc longwave has been averaged over 6 hr bucket, set ARDLW to 1 - -! time averaged incoming sfc longwave - VarName='dlwrf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwin) - -! inst incoming sfc longwave - VarName='dlwrf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rlwin) - -! time averaged outgoing sfc longwave - VarName='ulwrf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwout) -! inst outgoing sfc longwave - VarName='ulwrf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,radot) - -! where(alwout /= spval) alwout=-alwout ! CLDRAD puts a minus sign before gribbing -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (alwout(i,j) /= spval) alwout(i,j) = -alwout(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,alwout(isa,jsa) - -! time averaged outgoing model top longwave using gfsio - VarName='ulwrf_avetoa' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwtoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,alwtoa(isa,jsa) - -! GFS incoming sfc longwave has been averaged, set ARDLW to 1 - ardsw=1.0 -! trdsw=6.0 - -! time averaged incoming sfc shortwave - VarName='dswrf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswin) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswin(isa,jsa) - -! inst incoming sfc shortwave - VarName='dswrf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rswin) - -! inst incoming clear sky sfc shortwave - VarName='csdlf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rswinc) - -! time averaged incoming sfc uv-b using getgb - VarName='duvb_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,auvbin) -! if(debugprint)print*,'sample l',VarName,' = ',1,auvbin(isa,jsa) - -! time averaged incoming sfc clear sky uv-b using getgb - VarName='cduvb_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,auvbinc) -! if(debugprint)print*,'sample l',VarName,' = ',1,auvbinc(isa,jsa) - -! time averaged outgoing sfc shortwave using gfsio - VarName='uswrf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswout) -! where(aswout /= spval) aswout=-aswout ! CLDRAD puts a minus sign before gribbing -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (aswout(i,j) /= spval) aswout(i,j) = -aswout(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,aswout(isa,jsa) - -! inst outgoing sfc shortwave using gfsio - VarName='uswrf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,rswout) - -! time averaged model top incoming shortwave - VarName='dswrf_avetoa' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswintoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswintoa(isa,jsa) - -! time averaged model top outgoing shortwave - VarName='uswrf_avetoa' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswtoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswtoa(isa,jsa) - -! time averaged surface sensible heat flux, multiplied by -1 because wrf model flux -! has reversed sign convention using gfsio - VarName='shtfl_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcshx) -! where (sfcshx /= spval)sfcshx=-sfcshx -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sfcshx(i,j) /= spval) sfcshx(i,j) = -sfcshx(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcshx(isa,jsa) - -! inst surface sensible heat flux - VarName='shtfl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,twbs) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (twbs(i,j) /= spval) twbs(i,j) = -twbs(i,j) - enddo - enddo - -! GFS surface flux has been averaged, set ASRFC to 1 - asrfc=1.0 -! tsrfc=6.0 - -! time averaged surface latent heat flux, multiplied by -1 because wrf model flux -! has reversed sign vonvention using gfsio - VarName='lhtfl_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfclhx) -! where (sfclhx /= spval)sfclhx=-sfclhx -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sfclhx(i,j) /= spval) sfclhx(i,j) = -sfclhx(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,sfclhx(isa,jsa) - -! inst surface latent heat flux - VarName='lhtfl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,qwbs) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (qwbs(i,j) /= spval) qwbs(i,j) = -qwbs(i,j) - enddo - enddo - - if(me==0)print*,'rdaod= ',rdaod -! inst aod550 optical depth - if(rdaod) then - VarName='aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aod550) - - VarName='du_aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,du_aod550) - - VarName='ss_aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ss_aod550) - - VarName='su_aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,su_aod550) - - VarName='oc_aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,oc_aod550) - - VarName='bc_aod550' - call read_netcdf_2d_scatter(ncid2d,im,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,bc_aod550) - end if - -! time averaged ground heat flux using nemsio - VarName='gflux_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,subshx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) subshx(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,subshx(isa,jsa) - -! inst ground heat flux using nemsio - VarName='gflux' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,grnflx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) grnflx(i,j) = spval - enddo - enddo - -! time averaged zonal momentum flux using gfsio - VarName='uflx_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcux) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcux(isa,jsa) - -! time averaged meridional momentum flux using nemsio - VarName='vflx_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcvx) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcvx(isa,jsa) - -! dong read in inst surface flux -! inst zonal momentum flux using gfsio -! VarName='uflx' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcuxi) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcuxi(isa,jsa) - -! inst meridional momentum flux using nemsio -! VarName='vflx' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcvxi) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcvxi(isa,jsa) - - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - sfcuvx(i,j) = spval ! GFS does not use total momentum flux - enddo - enddo - -! time averaged zonal gravity wave stress using nemsio - VarName='u-gwd_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,gtaux) -! if(debugprint)print*,'sample l',VarName,' = ',1,gtaux(isa,jsa) - -! time averaged meridional gravity wave stress using getgb - VarName='v-gwd_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,gtauy) -! if(debugprint)print*,'sample l',VarName,' = ',1,gtauy(isa,jsa) - -! time averaged accumulated potential evaporation - VarName='pevpr_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgpotevp) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) avgpotevp(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,potevp(isa,jsa) - -! inst potential evaporation - VarName='pevpr' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,potevp) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) potevp(i,j) = spval - enddo - enddo - - do l=1,lm -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im -! GFS does not have temperature tendency due to long wave radiation - rlwtt(i,j,l) = spval -! GFS does not have temperature tendency due to short wave radiation - rswtt(i,j,l) = spval -! GFS does not have temperature tendency due to latent heating from convection - tcucn(i,j,l) = spval - tcucns(i,j,l) = spval -! GFS does not have temperature tendency due to latent heating from grid scale - train(i,j,l) = spval - enddo - enddo - enddo - -! set avrain to 1 - avrain=1.0 - avcnvc=1.0 - theat=6.0 ! just in case GFS decides to output T tendency - -! 10 m u using nemsio - VarName='ugrd10m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,u10) - - do j=jsta,jend - do i=1,im - u10h(i,j)=u10(i,j) - end do - end do -! if(debugprint)print*,'sample l',VarName,' = ',1,u10(isa,jsa) - -! 10 m v using gfsio - VarName='vgrd10m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,v10) - - do j=jsta,jend - do i=1,im - v10h(i,j)=v10(i,j) - end do - end do -! if(debugprint)print*,'sample l',VarName,' = ',1,v10(isa,jsa) - -! vegetation type, it's in GFS surface file, hopefully will merge into gfsio soon - VarName='vtype' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,buf) -! where (buf /= spval) -! ivgtyp=nint(buf) -! elsewhere -! ivgtyp=0 !need to feed reasonable value to crtm -! end where -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=1,im - if (buf(i,j) < spval) then - ivgtyp(i,j) = nint(buf(i,j)) - else - ivgtyp(i,j) = 0 - endif - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,ivgtyp(isa,jsa) - -! soil type, it's in GFS surface file, hopefully will merge into gfsio soon - VarName='sotyp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,buf) - VcoordName='sfc' - l=1 -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=1,im - if (buf(i,j) < spval) then - isltyp(i,j) = nint(buf(i,j)) - else - isltyp(i,j) = 0 !need to feed reasonable value to crtm - endif - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,isltyp(isa,jsa) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - smstav(i,j) = spval ! GFS does not have soil moisture availability -! smstot(i,j) = spval ! GFS does not have total soil moisture - sfcevp(i,j) = spval ! GFS does not have accumulated surface evaporation - acsnow(i,j) = spval ! GFS does not have averaged accumulated snow - acsnom(i,j) = spval ! GFS does not have snow melt -! sst(i,j) = spval ! GFS does not have sst???? - thz0(i,j) = ths(i,j) ! GFS does not have THZ0, use THS to substitute - qz0(i,j) = spval ! GFS does not output humidity at roughness length - uz0(i,j) = spval ! GFS does not output u at roughness length - vz0(i,j) = spval ! GFS does not output humidity at roughness length - enddo - enddo - do l=1,lm -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - EL_PBL(i,j,l) = spval ! GFS does not have mixing length - exch_h(i,j,l) = spval ! GFS does not output exchange coefficient - enddo - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,thz0(isa,jsa) - -! retrieve inst convective cloud top, GFS has cloud top pressure instead of index, -! will need to modify CLDRAD.f to use pressure directly instead of index -! VarName='pres' -! VcoordName='convect-cld top' -! l=1 -! if(debugprint)print*,'sample l',VarName,' = ',1,ptop(isa,jsa) - VarName='prescnvclt' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ptop) - - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - htop(i,j) = spval - if(ptop(i,j) <= 0.0) ptop(i,j) = spval - enddo - enddo - do j=jsta,jend - do i=1,im - if(ptop(i,j) < spval)then - do l=1,lm - if(ptop(i,j) <= pmid(i,j,l))then - htop(i,j) = l -! if(i==ii .and. j==jj)print*,'sample ptop,pmid pmid-1,pint= ', & -! ptop(i,j),pmid(i,j,l),pmid(i,j,l-1),pint(i,j,l),htop(i,j) - exit - end if - end do - end if - end do - end do - -! retrieve inst convective cloud bottom, GFS has cloud top pressure instead of index, -! will need to modify CLDRAD.f to use pressure directly instead of index - VarName='prescnvclb' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pbot) -! if(debugprint)print*,'sample l',VarName,VcoordName,' = ',1,pbot(isa,jsa) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - hbot(i,j) = spval - if(pbot(i,j) <= 0.0) pbot(i,j) = spval - enddo - enddo - do j=jsta,jend - do i=1,im -! if(.not.lb(i,j))print*,'false bitmask for pbot at ' -! + ,i,j,pbot(i,j) - if(pbot(i,j) < spval)then - do l=lm,1,-1 - if(pbot(i,j) >= pmid(i,j,l)) then - hbot(i,j) = l -! if(i==ii .and. j==jj)print*,'sample pbot,pmid= ', & -! pbot(i,j),pmid(i,j,l),hbot(i,j) - exit - end if - end do - end if - end do - end do - if(debugprint)print*,'sample hbot = ',hbot(isa,jsa) -! retrieve time averaged low cloud top pressure using nemsio - VarName='pres_avelct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ptopl) -! if(debugprint)print*,'sample l',VarName,' = ',1,ptopl(isa,jsa) - -! retrieve time averaged low cloud bottom pressure using nemsio - VarName='pres_avelcb' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pbotl) -! if(debugprint)print*,'sample l',VarName,' = ',1,pbotl(isa,jsa) - -! retrieve time averaged low cloud top temperature using nemsio - VarName='tmp_avelct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,Ttopl) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttopl(isa,jsa) - -! retrieve time averaged middle cloud top pressure using nemsio - VarName='pres_avemct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ptopm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,ptopm(isa,jsa) - -! retrieve time averaged middle cloud bottom pressure using nemsio - VarName='pres_avemcb' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pbotm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,pbotm(isa,jsa) - -! retrieve time averaged middle cloud top temperature using nemsio - VarName='tmp_avemct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,Ttopm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttopm(isa,jsa) - -! retrieve time averaged high cloud top pressure using nemsio ********* - VarName='pres_avehct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,ptoph) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,ptoph(isa,jsa) - -! retrieve time averaged high cloud bottom pressure using nemsio - VarName='pres_avehcb' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pboth) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,pboth(isa,jsa) - -! retrieve time averaged high cloud top temperature using nemsio - VarName='tmp_avehct' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,Ttoph) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttoph(isa,jsa) - -! retrieve boundary layer cloud cover using nemsio - VarName='tcdc_avebndcl' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,pblcfr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,pblcfr(isa,jsa) -! where (pblcfr /= spval)pblcfr=pblcfr/100. ! convert to fraction -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=1,im - if (pblcfr(i,j) < spval) pblcfr(i,j) = pblcfr(i,j) * 0.01 - enddo - enddo - -! retrieve cloud work function - VarName='cwork_aveclm' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,cldwork) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,cldwork(isa,jsa) - -! accumulated total (base+surface) runoff - VarName='watr_acc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,runoff) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) runoff(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,runoff(isa,jsa) - -! retrieve shelter max temperature using nemsio - VarName='tmax_max2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,maxtshltr) - -! retrieve shelter min temperature using nemsio - VarName='tmin_min2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,mintshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', & -! 1,mintshltr(im/2,(jsta+jend)/2) - -! retrieve shelter max RH -! VarName='rh02max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,maxrhshltr) - -! retrieve shelter min temperature using nemsio -! VarName='rh02min' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,minrhshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', & -! 1,mintshltr(im/2,(jsta+jend)/2) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=1,im - MAXRHSHLTR(i,j) = SPVAL - MINRHSHLTR(i,j) = SPVAL - enddo - enddo - -! retrieve ice thickness using nemsio - VarName='icetk' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,dzice) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,dzice(isa,jsa) - -! retrieve wilting point using nemsio - VarName='wilt' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smcwlt) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smcwlt(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,smcwlt(isa,jsa) - -! retrieve sunshine duration using nemsio - VarName='sunsd_acc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,suntime) - -! retrieve field capacity using nemsio - VarName='fldcp' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,fieldcapa) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) fieldcapa(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,fieldcapa(isa,jsa) - -! retrieve time averaged surface visible beam downward solar flux - VarName='vbdsf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avisbeamswin) - VcoordName='sfc' - l=1 - -! retrieve time averaged surface visible diffuse downward solar flux - VarName='vddsf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avisdiffswin) - -! retrieve time averaged surface near IR beam downward solar flux - VarName='nbdsf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,airbeamswin) - -! retrieve time averaged surface near IR diffuse downward solar flux - VarName='nddsf_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,airdiffswin) - -! retrieve time averaged surface clear sky outgoing LW - VarName='csulf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwoutc) - -! retrieve time averaged TOA clear sky outgoing LW - VarName='csulftoa' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwtoac) - -! retrieve time averaged surface clear sky outgoing SW - VarName='csusf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswoutc) - -! retrieve time averaged TOA clear sky outgoing LW - VarName='csusftoa' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswtoac) - -! retrieve time averaged surface clear sky incoming LW - VarName='csdlf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,alwinc) - -! retrieve time averaged surface clear sky incoming SW - VarName='csdsf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,aswinc) - -! retrieve shelter max specific humidity using nemsio - VarName='spfhmax_max2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,maxqshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', -! 1,maxqshltr(isa,jsa) - -! retrieve shelter min temperature using nemsio - VarName='spfhmin_min2m' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,minqshltr) - -! retrieve storm runoff using nemsio - VarName='ssrun_acc' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,SSROFF) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) ssroff(i,j) = spval - enddo - enddo - -! retrieve direct soil evaporation - VarName='evbs_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgedir) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) avgedir(i,j) = spval - enddo - enddo - -! retrieve CANOPY WATER EVAP - VarName='evcw_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgecan) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) avgecan(i,j) = spval - enddo - enddo - -! retrieve PLANT TRANSPIRATION - VarName='trans_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgetrans) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) avgetrans(i,j) = spval - enddo - enddo - -! retrieve snow sublimation - VarName='sbsno_ave' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgesnow) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j)==1.0 .and. sice(i,j)==0.) avgesnow(i,j)=spval - enddo - enddo - -! retrive total soil moisture - VarName='soilm' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,smstot) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) smstot(i,j) = spval - enddo - enddo - -! retrieve snow phase change heat flux - VarName='snohf' - call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & - ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,snopcx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - if (sm(i,j) /= 0.0) snopcx(i,j) = spval - enddo - enddo - -! GFS does not have deep convective cloud top and bottom fields - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=1,im - HTOPD(i,j) = SPVAL - HBOTD(i,j) = SPVAL - HTOPS(i,j) = SPVAL - HBOTS(i,j) = SPVAL - CUPPT(i,j) = SPVAL - enddo - enddo - -! done with flux file, close it for now - Status=nf90_close(ncid2d) -! deallocate(tmp,recname,reclevtyp,reclev) - -! pos east -! call collect_loc(gdlat,dummy) -! if(me == 0)then -! latstart = nint(dummy(1,1)*gdsdegr) -! latlast = nint(dummy(im,jm)*gdsdegr) -! print*,'laststart,latlast B bcast= ',latstart,latlast,'gdsdegr=',gdsdegr,& -! 'dummy(1,1)=',dummy(1,1),dummy(im,jm),'gdlat=',gdlat(1,1) -! end if -! call mpi_bcast(latstart,1,MPI_INTEGER,0,mpi_comm_comp,irtn) -! call mpi_bcast(latlast,1,MPI_INTEGER,0,mpi_comm_comp,irtn) -! write(6,*) 'laststart,latlast,me A calling bcast=',latstart,latlast,me -! call collect_loc(gdlon,dummy) -! if(me == 0)then -! lonstart = nint(dummy(1,1)*gdsdegr) -! lonlast = nint(dummy(im,jm)*gdsdegr) -! end if -! call mpi_bcast(lonstart,1,MPI_INTEGER,0,mpi_comm_comp,irtn) -! call mpi_bcast(lonlast, 1,MPI_INTEGER,0,mpi_comm_comp,irtn) - -! write(6,*)'lonstart,lonlast A calling bcast=',lonstart,lonlast -! - -! generate look up table for lifted parcel calculations - - THL = 210. - PLQ = 70000. - pt_TBL = 10000. ! this is for 100 hPa added by Moorthi - - CALL TABLE(PTBL,TTBL,PT_TBL, & - RDQ,RDTH,RDP,RDTHE,PL,THL,QS0,SQS,STHE,THE0) - - CALL TABLEQ(TTBLQ,RDPQ,RDTHEQ,PLQ,THL,STHEQ,THE0Q) - -! -! - IF(ME == 0)THEN - WRITE(6,*)' SPL (POSTED PRESSURE LEVELS) BELOW: ' - WRITE(6,51) (SPL(L),L=1,LSM) - 50 FORMAT(14(F4.1,1X)) - 51 FORMAT(8(F8.1,1X)) - ENDIF -! -!$omp parallel do private(l) - DO L = 1,LSM - ALSL(L) = LOG(SPL(L)) - END DO -! -!HC WRITE IGDS OUT FOR WEIGHTMAKER TO READ IN AS KGDSIN - if(me == 0)then - print*,'writing out igds' - igdout = 110 -! open(igdout,file='griddef.out',form='unformatted' -! + ,status='unknown') - if(maptype == 1)THEN ! Lambert conformal - WRITE(igdout)3 - WRITE(6,*)'igd(1)=',3 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)TRUELAT2 - WRITE(igdout)TRUELAT1 - WRITE(igdout)255 - ELSE IF(MAPTYPE == 2)THEN !Polar stereographic - WRITE(igdout)5 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)TRUELAT2 !Assume projection at +-90 - WRITE(igdout)TRUELAT1 - WRITE(igdout)255 - ! Note: The calculation of the map scale factor at the standard - ! lat/lon and the PSMAPF - ! Get map factor at 60 degrees (N or S) for PS projection, which will - ! be needed to correctly define the DX and DY values in the GRIB GDS - if (TRUELAT1 < 0.) THEN - LAT = -60. - else - LAT = 60. - end if - - CALL MSFPS (LAT,TRUELAT1*0.001,PSMAPF) - - ELSE IF(MAPTYPE == 3) THEN !Mercator - WRITE(igdout)1 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)latlast - WRITE(igdout)lonlast - WRITE(igdout)TRUELAT1 - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)255 - ELSE IF(MAPTYPE == 0 .OR. MAPTYPE == 203)THEN !A STAGGERED E-GRID - WRITE(igdout)203 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)136 - WRITE(igdout)CENLAT - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)64 - WRITE(igdout)0 - WRITE(igdout)0 - WRITE(igdout)0 - END IF - end if -! -! - - RETURN - END - diff --git a/sorc/ncep_post.fd/INITPOST_GFS_NETCDF_PARA.f b/sorc/ncep_post.fd/INITPOST_GFS_NETCDF_PARA.f deleted file mode 100644 index 888a26f31..000000000 --- a/sorc/ncep_post.fd/INITPOST_GFS_NETCDF_PARA.f +++ /dev/null @@ -1,2691 +0,0 @@ -!> @file -! . . . -!> SUBPROGRAM: INITPOST_GFS_NETCDF_PARA INITIALIZE POST FOR RUN -!! PRGRMMR: Wen Meng DATE: 2020-02-04 -!! -!! ABSTRACT: THIS ROUTINE INITIALIZES CONSTANTS AND -!! VARIABLES AT THE START OF GFS MODEL OR POST -!! PROCESSOR RUN. -!! -!! REVISION HISTORY -!! 2020-02-04 W Meng start from INITPOST_GFS_NETCDF.f -!! 2021-03-11 Bo Cui change local arrays to dimension (im,jsta:jend) -!! 2021-10-26 J Meng 2D DECOMPOSITION -!! -!! USAGE: CALL INITPOST_GFS_NETCDF_PARA -!! INPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT ARGUMENT LIST: -!! NONE -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK -!! LOOKUP -!! SOILDEPTH -!! -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : CRAY C-90 -!! - SUBROUTINE INITPOST_GFS_NETCDF_PARA(ncid3d) - - - use netcdf - use vrbls4d, only: dust, SALT, SUSO, SOOT, WASO, PP25, PP10 - use vrbls3d, only: t, q, uh, vh, pmid, pint, alpint, dpres, zint, zmid, o3, & - qqr, qqs, cwm, qqi, qqw, omga, rhomid, q2, cfr, rlwtt, rswtt, tcucn, & - tcucns, train, el_pbl, exch_h, vdifftt, vdiffmois, dconvmois, nradtt, & - o3vdiff, o3prod, o3tndy, mwpv, unknown, vdiffzacce, zgdrag,cnvctummixing, & - vdiffmacce, mgdrag, cnvctvmmixing, ncnvctcfrac, cnvctumflx, cnvctdmflx, & - cnvctzgdrag, sconvmois, cnvctmgdrag, cnvctdetmflx, duwt, duem, dusd, dudp, & - wh, qqg, ref_10cm - use vrbls2d, only: f, pd, fis, pblh, ustar, z0, ths, qs, twbs, qwbs, avgcprate, & - cprate, avgprec, prec, lspa, sno, si, cldefi, th10, q10, tshltr, pshltr, & - tshltr, albase, avgalbedo, avgtcdc, czen, czmean, mxsnal, landfrac, radot, sigt4, & - cfrach, cfracl, cfracm, avgcfrach, qshltr, avgcfracl, avgcfracm, cnvcfr, & - islope, cmc, grnflx, vegfrc, acfrcv, ncfrcv, acfrst, ncfrst, ssroff, & - bgroff, rlwin, rlwtoa, cldwork, alwin, alwout, alwtoa, rswin, rswinc, & - rswout, aswin, auvbin, auvbinc, aswout, aswtoa, sfcshx, sfclhx, subshx, & - snopcx, sfcux, sfcvx, sfcuxi, sfcvxi, sfcuvx, gtaux, gtauy, potevp, u10, v10, smstav, & - smstot, ivgtyp, isltyp, sfcevp, sfcexc, acsnow, acsnom, sst, thz0, qz0, & - uz0, vz0, ptop, htop, pbot, hbot, ptopl, pbotl, ttopl, ptopm, pbotm, ttopm, & - ptoph, pboth, pblcfr, ttoph, runoff, tecan, tetran, tedir, twa, maxtshltr, & - mintshltr, maxrhshltr, fdnsst, & - minrhshltr, dzice, smcwlt, suntime, fieldcapa, htopd, hbotd, htops, hbots, & - cuppt, dusmass, ducmass, dusmass25, ducmass25, aswintoa,rel_vort_maxhy1, & - maxqshltr, minqshltr, acond, sr, u10h, v10h,refd_max, w_up_max, w_dn_max, & - up_heli_max,up_heli_min,up_heli_max03,up_heli_min03,rel_vort_max01,u10max, v10max, & - avgedir,avgecan,paha,pahi,avgetrans,avgesnow,avgprec_cont,avgcprate_cont,rel_vort_max, & - avisbeamswin,avisdiffswin,airbeamswin,airdiffswin,refdm10c_max,wspd10max, & - alwoutc,alwtoac,aswoutc,aswtoac,alwinc,aswinc,avgpotevp,snoavg, & - ti,aod550,du_aod550,ss_aod550,su_aod550,oc_aod550,bc_aod550 - use soil, only: sldpth, sh2o, smc, stc - use masks, only: lmv, lmh, htm, vtm, gdlat, gdlon, dx, dy, hbm2, sm, sice - use physcons_post, only: grav => con_g, fv => con_fvirt, rgas => con_rd, & - eps => con_eps, epsm1 => con_epsm1 - use params_mod, only: erad, dtr, tfrz, h1, d608, rd, p1000, capa,pi - use lookup_mod, only: thl, plq, ptbl, ttbl, rdq, rdth, rdp, rdthe, pl, qs0, sqs, sthe, & - ttblq, rdpq, rdtheq, stheq, the0q, the0 - use ctlblk_mod, only: me, mpi_comm_comp, icnt, idsp, jsta, jend, ihrst, idat, sdat, ifhr, & - ifmin, filename, tprec, tclod, trdlw, trdsw, tsrfc, tmaxmin, td3d, restrt, sdat, & - jend_m, imin, imp_physics, dt, spval, pdtop, pt, qmin, nbin_du, nphs, dtq2, ardlw,& - ardsw, asrfc, avrain, avcnvc, theat, gdsdegr, spl, lsm, alsl, im, jm, im_jm, lm, & - jsta_2l, jend_2u, nsoil, lp1, icu_physics, ivegsrc, novegtype, nbin_ss, nbin_bc, & - nbin_oc, nbin_su, gocart_on, pt_tbl, hyb_sigp, filenameFlux, fileNameAER,rdaod, & - ista, iend, ista_2l, iend_2u,iend_m - use gridspec_mod, only: maptype, gridtype, latstart, latlast, lonstart, lonlast, cenlon, & - dxval, dyval, truelat2, truelat1, psmapf, cenlat,lonstartv, lonlastv, cenlonv, & - latstartv, latlastv, cenlatv,latstart_r,latlast_r,lonstart_r,lonlast_r - - use upp_physics, only: fpvsnew -!- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - implicit none -! -! type(nemsio_gfile) :: nfile,ffile,rfile - integer,parameter :: nvar2d=48 -! character(nemsio_charkind) :: name2d(nvar2d) - integer :: nvar3d, numDims -! character(nemsio_charkind), allocatable :: name3din(:), name3dout(:) -! character(nemsio_charkind) :: varname,levtype -! -! INCLUDE/SET PARAMETERS. -! - INCLUDE "mpif.h" - -! integer,parameter:: MAXPTS=1000000 ! max im*jm points -! -! real,parameter:: con_g =9.80665e+0! gravity -! real,parameter:: con_rv =4.6150e+2 ! gas constant H2O -! real,parameter:: con_rd =2.8705e+2 ! gas constant air -! real,parameter:: con_fvirt =con_rv/con_rd-1. -! real,parameter:: con_eps =con_rd/con_rv -! real,parameter:: con_epsm1 =con_rd/con_rv-1 -! -! This version of INITPOST shows how to initialize, open, read from, and -! close a NetCDF dataset. In order to change it to read an internal (binary) -! dataset, do a global replacement of _ncd_ with _int_. - - real, parameter :: gravi = 1.0/grav - character(len=20) :: VarName, VcoordName - integer :: Status, fldsize, fldst, recn, recn_vvel - character startdate*19,SysDepInfo*80,cgar*1 - character startdate2(19)*4 -! -! NOTE: SOME INTEGER VARIABLES ARE READ INTO DUMMY ( A REAL ). THIS IS OK -! AS LONG AS REALS AND INTEGERS ARE THE SAME SIZE. -! -! ALSO, EXTRACT IS CALLED WITH DUMMY ( A REAL ) EVEN WHEN THE NUMBERS ARE -! INTEGERS - THIS IS OK AS LONG AS INTEGERS AND REALS ARE THE SAME SIZE. - LOGICAL RUNB,SINGLRST,SUBPOST,NEST,HYDRO,IOOMG,IOALL -! logical, parameter :: debugprint = .true., zerout = .false. - logical, parameter :: debugprint = .false., zerout = .false. - logical :: convert_rad_to_deg=.false. - CHARACTER*32 varcharval -! CHARACTER*40 CONTRL,FILALL,FILMST,FILTMP,FILTKE,FILUNV,FILCLD,FILRAD,FILSFC - CHARACTER*4 RESTHR - CHARACTER FNAME*255,ENVAR*50 - INTEGER IDATE(8),JDATE(8),JPDS(200),JGDS(200),KPDS(200),KGDS(200) -! LOGICAL*1 LB(IM,JM) -! -! INCLUDE COMMON BLOCKS. -! -! DECLARE VARIABLES. -! -! REAL fhour - integer nfhour ! forecast hour from nems io file - integer fhzero !bucket - real dtp !physics time step - REAL RINC(5) - - REAL DUMMY(IM,JM) -!jw - integer ii,jj,js,je,iyear,imn,iday,itmp,ioutcount,istatus, & - I,J,L,ll,k,kf,irtn,igdout,n,Index,nframe, & - nframed2,iunitd3d,ierr,idum,iret,nrec,idrt - integer ncid3d,ncid2d,varid,nhcas - real TSTART,TLMH,TSPH,ES,FACT,soilayert,soilayerb,zhour,dum, & - tvll,pmll,tv, tx1, tx2 - - character*20,allocatable :: recname(:) - integer, allocatable :: reclev(:), kmsk(:,:) - real, allocatable :: glat1d(:), glon1d(:), qstl(:) - real, allocatable :: wrk1(:,:), wrk2(:,:) - real, allocatable :: p2d(:,:), t2d(:,:), q2d(:,:), & - qs2d(:,:), cw2d(:,:), cfr2d(:,:) - real(kind=4),allocatable :: vcoord4(:,:,:) - real, dimension(lm+1) :: ak5, bk5 - real*8, allocatable :: pm2d(:,:), pi2d(:,:) - real, allocatable :: tmp(:) - real :: buf(ista_2l:iend_2u,jsta_2l:jend_2u) - real :: buf3d(ista_2l:iend_2u,jsta_2l:jend_2u,lm) - -! real buf(im,jsta_2l:jend_2u),bufsoil(im,nsoil,jsta_2l:jend_2u) & -! ,buf3d(im,jsta_2l:jend_2u,lm),buf3d2(im,lp1,jsta_2l:jend_2u) - - real LAT - integer isa, jsa, latghf, jtem, idvc, idsl, nvcoord, ip1, nn, npass - - integer, parameter :: npass2=5, npass3=30 - real, parameter :: third=1.0/3.0 - INTEGER, DIMENSION(2) :: ij4min, ij4max - REAL :: omgmin, omgmax - real, allocatable :: d2d(:,:), u2d(:,:), v2d(:,:), omga2d(:,:) - REAL, ALLOCATABLE :: ps2d(:,:),psx2d(:,:),psy2d(:,:) - real, allocatable :: div3d(:,:,:) - real(kind=4),allocatable :: vcrd(:,:) - real :: dum_const - -!*********************************************************************** -! START INIT HERE. -! - WRITE(6,*)'INITPOST: ENTER INITPOST_GFS_NETCDF_PARA' - WRITE(6,*)'me=',me, & - 'jsta_2l=',jsta_2l,'jend_2u=', & - jend_2u,'im=',im, & - 'ista_2l=',ista_2l,'iend_2u=',iend_2u, & - 'ista=',ista,'iend=',iend, & - 'iend_m=',iend_m -! - isa = (ista+iend) / 2 - jsa = (jsta+jend) / 2 - -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=ista_2l, iend_2u - buf(i,j) = spval - enddo - enddo - - Status=nf90_get_att(ncid3d,nf90_global,'ak',ak5) - if(Status/=0)then - print*,'ak not found; assigning missing value' - ak5=spval - else - if(me==0)print*,'ak5= ',ak5 - end if - Status=nf90_get_att(ncid3d,nf90_global,'idrt',idrt) - if(Status/=0)then - print*,'idrt not in netcdf file,reading grid' - Status=nf90_get_att(ncid3d,nf90_global,'grid',varcharval) - if(Status/=0)then - print*,'idrt and grid not in netcdf file, set default to latlon' - idrt=0 - MAPTYPE=0 - else - if(trim(varcharval)=='rotated_latlon')then - MAPTYPE=207 - idrt=207 - Status=nf90_get_att(ncid3d,nf90_global,'cen_lon',dum_const) - if(Status/=0)then - print*,'cen_lon not found; assigning missing value' - cenlon=spval - else - if(dum_const<0.)then - cenlon=nint((dum_const+360.)*gdsdegr) - else - cenlon=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'cen_lat',dum_const) - if(Status/=0)then - print*,'cen_lat not found; assigning missing value' - cenlat=spval - else - cenlat=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon1',dum_const) - if(Status/=0)then - print*,'lonstart_r not found; assigning missing value' - lonstart_r=spval - else - if(dum_const<0.)then - lonstart_r=nint((dum_const+360.)*gdsdegr) - else - lonstart_r=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat1',dum_const) - if(Status/=0)then - print*,'latstart_r not found; assigning missing value' - latstart_r=spval - else - latstart_r=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon2',dum_const) - if(Status/=0)then - print*,'lonlast_r not found; assigning missing value' - lonlast_r=spval - else - if(dum_const<0.)then - lonlast_r=nint((dum_const+360.)*gdsdegr) - else - lonlast_r=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat2',dum_const) - if(Status/=0)then - print*,'latlast_r not found; assigning missing value' - latlast_r=spval - else - latlast_r=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'dlon',dum_const) - if(Status/=0)then - print*,'dlmd not found; assigning missing value' - dxval=spval - else - dxval=dum_const*gdsdegr - end if - Status=nf90_get_att(ncid3d,nf90_global,'dlat',dum_const) - if(Status/=0)then - print*,'dphd not found; assigning missing value' - dyval=spval - else - dyval=dum_const*gdsdegr - end if - - print*,'lonstart,latstart,cenlon,cenlat,dyval,dxval', & - lonstart,latstart,cenlon,cenlat,dyval,dxval - -! Jili Dong add support for regular lat lon (2019/03/22) start - else if(trim(varcharval)=='latlon')then - MAPTYPE=0 - idrt=0 - - Status=nf90_get_att(ncid3d,nf90_global,'lon1',dum_const) - if(Status/=0)then - print*,'lonstart not found; assigning missing value' - lonstart=spval - else - if(dum_const<0.)then - lonstart=nint((dum_const+360.)*gdsdegr) - else - lonstart=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat1',dum_const) - if(Status/=0)then - print*,'latstart not found; assigning missing value' - latstart=spval - else - latstart=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'lon2',dum_const) - if(Status/=0)then - print*,'lonlast not found; assigning missing value' - lonlast=spval - else - if(dum_const<0.)then - lonlast=nint((dum_const+360.)*gdsdegr) - else - lonlast=dum_const*gdsdegr - end if - end if - Status=nf90_get_att(ncid3d,nf90_global,'lat2',dum_const) - if(Status/=0)then - print*,'latlast not found; assigning missing value' - latlast=spval - else - latlast=dum_const*gdsdegr - end if - - Status=nf90_get_att(ncid3d,nf90_global,'dlon',dum_const) - if(Status/=0)then - print*,'dlmd not found; assigning missing value' - dxval=spval - else - dxval=dum_const*gdsdegr - end if - Status=nf90_get_att(ncid3d,nf90_global,'dlat',dum_const) - if(Status/=0)then - print*,'dphd not found; assigning missing value' - dyval=spval - else - dyval=dum_const*gdsdegr - end if - - print*,'lonstart,latstart,dyval,dxval', & - lonstart,lonlast,latstart,latlast,dyval,dxval - -! Jili Dong add support for regular lat lon (2019/03/22) end - - else if(trim(varcharval)=='gaussian')then - MAPTYPE=4 - idrt=4 - else ! setting default maptype - MAPTYPE=0 - idrt=0 - end if - end if !end reading grid - end if !end reading idrt - if(me==0)print*,'idrt MAPTYPE= ',idrt,MAPTYPE -! STEP 1. READ MODEL OUTPUT FILE -! -! -!*** -! -! LMH and LMV always = LM for sigma-type vert coord - -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i = ista_2l, iend_2u - LMV(i,j) = lm - LMH(i,j) = lm - end do - end do - -! HTM VTM all 1 for sigma-type vert coord - -!$omp parallel do private(i,j,l) - do l = 1, lm - do j = jsta_2l, jend_2u - do i = ista_2l, iend_2u - HTM (i,j,l) = 1.0 - VTM (i,j,l) = 1.0 - end do - end do - end do - - Status=nf90_get_att(ncid3d,nf90_global,'nhcas',nhcas) - if(Status/=0)then - print*,'nhcas not in netcdf file, set default to nonhydro' - nhcas=0 - end if - if(me==0)print*,'nhcas= ',nhcas - if (nhcas == 0 ) then !non-hydrostatic case - nrec=15 - allocate (recname(nrec)) - recname=[character(len=20) :: 'ugrd','vgrd','spfh','tmp','o3mr', & - 'presnh','dzdt', 'clwmr','dpres', & - 'delz','icmr','rwmr', & - 'snmr','grle','cld_amt'] - else - nrec=8 - allocate (recname(nrec)) - recname=[character(len=20) :: 'ugrd','vgrd','tmp','spfh','o3mr', & - 'hypres', 'clwmr','dpres'] - endif - -! write(0,*)'nrec=',nrec - !allocate(recname(nrec),reclevtyp(nrec),reclev(nrec)) - allocate(glat1d(jm),glon1d(im)) - allocate(vcoord4(lm+1,3,2)) - -! hardwire idate for now -! idate=(/2017,08,07,00,0,0,0,0/) -! get cycle start time - Status=nf90_inq_varid(ncid3d,'time',varid) - if(Status/=0)then - print*,'time not in netcdf file, stopping' - STOP 1 - else - Status=nf90_get_att(ncid3d,varid,'units',varcharval) - if(Status/=0)then - print*,'time unit not available' - else - print*,'time unit read from netcdf file= ',varcharval -! assume use hours as unit -! idate_loc=index(varcharval,'since')+6 - read(varcharval,101)idate(1),idate(2),idate(3),idate(4),idate(5) - end if -! Status=nf90_inquire_dimension(ncid3d,varid,len=ntimes) -! allocate(fhours(ntimes)) -! status = nf90_inq_varid(ncid3d,varid,fhours) -! Status=nf90_get_var(ncid3d,varid,nfhour,start=(/1/), & -! count=(/1/)) -! if(Status/=0)then -! print*,'forecast hour not in netcdf file, stopping' -! STOP 1 -! end if - end if - 101 format(T13,i4,1x,i2,1x,i2,1x,i2,1x,i2) - print*,'idate= ',idate(1:5) -! get longitude - Status=nf90_inq_varid(ncid3d,'grid_xt',varid) - Status=nf90_inquire_variable(ncid3d,varid,ndims = numDims) - if(debugprint)print*,'number of dim for gdlon ',numDims - if(numDims==1)then - Status=nf90_get_var(ncid3d,varid,glon1d) - do j=jsta,jend - do i=ista,iend - gdlon(i,j) = real(glon1d(i),kind=4) - end do - end do - lonstart = nint(glon1d(1)*gdsdegr) - lonlast = nint(glon1d(im)*gdsdegr) - dxval = nint(abs(glon1d(1)-glon1d(2))*gdsdegr) - else if(numDims==2)then - Status=nf90_get_var(ncid3d,varid,dummy) - if(maxval(abs(dummy))<2.0*pi)convert_rad_to_deg=.true. - if(convert_rad_to_deg)then - do j=jsta,jend - do i=ista,iend - gdlon(i,j) = real(dummy(i,j),kind=4)*180./pi - end do - end do - else - do j=jsta,jend - do i=ista,iend - gdlon(i,j) = real(dummy(i,j),kind=4) - end do - end do - end if - if(convert_rad_to_deg)then - lonstart = nint(dummy(1,1)*gdsdegr)*180./pi - lonlast = nint(dummy(im,jm)*gdsdegr)*180./pi - dxval = nint(abs(dummy(1,1)-dummy(2,1))*gdsdegr)*180./pi - else - lonstart = nint(dummy(1,1)*gdsdegr) - lonlast = nint(dummy(im,jm)*gdsdegr) - dxval = nint(abs(dummy(1,1)-dummy(2,1))*gdsdegr) - end if - -! Jili Dong add support for regular lat lon (2019/03/22) start - if (MAPTYPE == 0) then - if(lonstart<0.)then - lonstart=lonstart+360.*gdsdegr - end if - if(lonlast<0.)then - lonlast=lonlast+360.*gdsdegr - end if - end if -! Jili Dong add support for regular lat lon (2019/03/22) end - - end if - print*,'lonstart,lonlast,dxval,me = ',lonstart,lonlast,dxval,me -! get latitude - Status=nf90_inq_varid(ncid3d,'grid_yt',varid) - Status=nf90_inquire_variable(ncid3d,varid,ndims = numDims) - if(debugprint)print*,'number of dim for gdlat ',numDims - if(numDims==1)then - Status=nf90_get_var(ncid3d,varid,glat1d) - do j=jsta,jend - do i=ista,iend - gdlat(i,j) = real(glat1d(j),kind=4) - end do - end do - latstart = nint(glat1d(1)*gdsdegr) - latlast = nint(glat1d(jm)*gdsdegr) - dyval = nint(abs(glat1d(1)-glat1d(2))*gdsdegr) - else if(numDims==2)then - Status=nf90_get_var(ncid3d,varid,dummy) - if(maxval(abs(dummy))1000.)print*,'bad T ',t(i,j,l) - enddo - enddo - enddo - call read_netcdf_3d_para(ncid3d,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,recname(11),qqi(ista_2l,jsta_2l,1),lm) - call read_netcdf_3d_para(ncid3d,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,recname(12),qqr(ista_2l,jsta_2l,1),lm) - call read_netcdf_3d_para(ncid3d,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,recname(13),qqs(ista_2l,jsta_2l,1),lm) - call read_netcdf_3d_para(ncid3d,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,recname(14),qqg(ista_2l,jsta_2l,1),lm) - call read_netcdf_3d_para(ncid3d,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,recname(15),cfr(ista_2l,jsta_2l,1),lm) - -! calculate CWM from FV3 output - do l=1,lm - do j=jsta,jend - do i=ista,iend - cwm(i,j,l)=qqg(i,j,l)+qqs(i,j,l)+qqr(i,j,l)+qqi(i,j,l)+qqw(i,j,l) - enddo - enddo - if(debugprint)print*,'sample l,t,q,u,v,w,= ',isa,jsa,l & - ,t(isa,jsa,l),q(isa,jsa,l),uh(isa,jsa,l),vh(isa,jsa,l) & - ,wh(isa,jsa,l) - if(debugprint)print*,'sample l cwm for FV3',l, & - cwm(isa,jsa,l) - end do - -! surface pressure - VarName='pressfc' - call read_netcdf_2d_para(ncid3d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pint(ista_2l,jsta_2l,lp1)) - do j=jsta,jend - do i=ista,iend -! if(pint(i,j,lp1)>1.0E6 .or. pint(1,jsta_2l,lp1)<50000.) & -! print*,'bad psfc ',i,j,pint(i,j,lp1) - end do - end do - if(debugprint)print*,'sample ',VarName,' =',pint(isa,jsa,lp1) - - pt = ak5(1) - - do j=jsta,jend - do i=ista,iend - pint(i,j,1)= pt - end do - end do - - do l=2,lp1 - do j=jsta,jend - do i=ista,iend - pint(i,j,l) = pint(i,j,l-1) + dpres(i,j,l-1) - enddo - enddo -! if (me == 0) print*,'sample model pint,pmid' ,ii,jj,l & -! ,pint(ii,jj,l),pmid(ii,jj,l) - end do - -!compute pmid from averaged two layer pint - do l=lm,1,-1 - do j=jsta,jend - do i=ista,iend - pmid(i,j,l) = 0.5*(pint(i,j,l)+pint(i,j,l+1)) - enddo - enddo - enddo - -! surface height from FV3 -! dong set missing value for zint -! zint=spval - VarName='hgtsfc' - call read_netcdf_2d_para(ncid3d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,zint(ista_2l,jsta_2l,lp1)) - if(debugprint)print*,'sample ',VarName,' =',zint(isa,jsa,lp1) - do j=jsta,jend - do i=ista,iend - if (zint(i,j,lp1) /= spval) then - fis(i,j) = zint(i,j,lp1) * grav - else - fis(i,j) = spval - endif - enddo - enddo - - do l=lm,1,-1 - do j=jsta,jend - do i=ista,iend - if(zint(i,j,l+1)/=spval .and. buf3d(i,j,l)/=spval)then -!make sure delz is positive - zint(i,j,l)=zint(i,j,l+1)+abs(buf3d(i,j,l)) -! if(zint(i,j,l)>1.0E6)print*,'bad H ',i,j,l,zint(i,j,l) - else - zint(i,j,l)=spval - end if - end do - end do - print*,'sample zint= ',isa,jsa,l,zint(isa,jsa,l) - end do - - do l=lp1,1,-1 - do j=jsta,jend - do i=ista,iend - alpint(i,j,l)=log(pint(i,j,l)) - end do - end do - end do - - do l=lm,1,-1 - do j=jsta,jend - do i=ista,iend - if(zint(i,j,l+1)/=spval .and. zint(i,j,l)/=spval & - .and. pmid(i,j,l)/=spval)then - zmid(i,j,l)=zint(i,j,l+1)+(zint(i,j,l)-zint(i,j,l+1))* & - (log(pmid(i,j,l))-alpint(i,j,l+1))/ & - (alpint(i,j,l)-alpint(i,j,l+1)) - if(zmid(i,j,l)>1.0E6)print*,'bad Hmid ',i,j,l,zmid(i,j,l) - else - zmid(i,j,l)=spval - endif - end do - end do - end do - - - pt = ak5(1) - -! - - deallocate (vcoord4) -!+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -! - -! done with 3d file, close it for now - Status=nf90_close(ncid3d) - deallocate(recname) - -! open flux file - Status = nf90_open(trim(fileNameFlux),ior(nf90_nowrite, nf90_mpiio), & - ncid2d,comm=mpi_comm_world,info=mpi_info_null) - if ( Status /= 0 ) then - print*,'error opening ',fileNameFlux, ' Status = ', Status - print*,'skip reading of flux file' - endif - -! IVEGSRC=1 for IGBP, 0 for USGS, 2 for UMD - VarName='IVEGSRC' - Status=nf90_get_att(ncid2d,nf90_global,'IVEGSRC',IVEGSRC) - if (Status /= 0) then - print*,VarName,' not found-Assigned 1 for IGBP as default' - IVEGSRC=1 - end if - if (me == 0) print*,'IVEGSRC= ',IVEGSRC - -! set novegtype based on vegetation classification - if(ivegsrc==2)then - novegtype=13 - else if(ivegsrc==1)then - novegtype=20 - else if(ivegsrc==0)then - novegtype=24 - end if - if (me == 0) print*,'novegtype= ',novegtype - - Status=nf90_get_att(ncid2d,nf90_global,'imp_physics',imp_physics) - if (Status /= 0) then - print*,VarName,' not found-Assigned 11 GFDL as default' - imp_physics=11 - end if - if (me == 0) print*,'MP_PHYSICS= ',imp_physics -! - Status=nf90_get_att(ncid2d,nf90_global,'fhzero',fhzero) - if (Status /= 0) then - print*,VarName,' not found-Assigned 3 hours as default' - fhzero=3 - end if - if (me == 0) print*,'fhzero= ',fhzero -! - Status=nf90_get_att(ncid2d,nf90_global,'dtp',dtp) - if (Status /= 0) then - print*,VarName,' not found-Assigned 90s as default' - dtp=90 - end if - if (me == 0) print*,'dtp= ',dtp -! Initializes constants for Ferrier microphysics - if(imp_physics==5 .or. imp_physics==85 .or. imp_physics==95) then - CALL MICROINIT(imp_physics) - end if - - tprec = float(fhzero) - if(ifhr>240)tprec=12. - tclod = tprec - trdlw = tprec - trdsw = tprec - tsrfc = tprec - tmaxmin = tprec - td3d = tprec - print*,'tprec = ',tprec - - -!Set REF_10CM as missning since gfs doesn't ouput it - do l=1,lm - do j=jsta,jend - do i=ista,iend - REF_10CM(i,j,l)=spval - enddo - enddo - enddo - - VarName='land' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sm) - if(debugprint)print*,'sample ',VarName,' =',sm((ista+iend)/2,(jsta+jend)/2) - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= spval) sm(i,j) = 1.0 - sm(i,j) - enddo - enddo - -! sea ice mask - - VarName = 'icec' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sice) - if(debugprint)print*,'sample ',VarName,' = ',sice(isa,jsa) - -! where(sice /=spval .and. sice >=1.0)sm=0.0 !sea ice has sea -! mask=0 -! GFS flux files have land points with non-zero sea ice, per Iredell, -! these -! points have sea ice changed to zero, i.e., trust land mask more than -! sea ice -! where(sm/=spval .and. sm==0.0)sice=0.0 !specify sea ice=0 at land - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= spval .and. sm(i,j) == 0.0) sice(i,j) = 0.0 - enddo - enddo - - -! PBL height using nemsio - VarName = 'hpbl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pblh) - if(debugprint)print*,'sample ',VarName,' = ',pblh(isa,jsa) - -! frictional velocity using nemsio - VarName='fricv' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ustar) -! if(debugprint)print*,'sample ',VarName,' = ',ustar(isa,jsa) - -! roughness length using getgb - VarName='sfcr' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,z0) -! if(debugprint)print*,'sample ',VarName,' = ',z0(isa,jsa) - -! sfc exchange coeff - VarName='sfexc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,SFCEXC) - -! aerodynamic conductance - VarName='acond' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,acond) - if(debugprint)print*,'sample ',VarName,' = ',acond(isa,jsa) - -! mid day avg albedo - VarName='albdo_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgalbedo) - if(debugprint)print*,'sample ',VarName,' = ',avgalbedo(isa,jsa) - do j=jsta,jend - do i=ista,iend - if (avgalbedo(i,j) /= spval) avgalbedo(i,j) = avgalbedo(i,j) * 0.01 - enddo - enddo - -! surface potential T using getgb - VarName='tmpsfc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ths) - -! where(ths/=spval)ths=ths*(p1000/pint(:,:,lp1))**CAPA ! convert to THS - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (ths(i,j) /= spval) then -! write(0,*)' i=',i,' j=',j,' ths=',ths(i,j),' pint=',pint(i,j,lp1) - ths(i,j) = ths(i,j) * (p1000/pint(i,j,lp1))**capa - endif - QS(i,j) = SPVAL ! GFS does not have surface specific humidity - twbs(i,j) = SPVAL ! GFS does not have inst sensible heat flux - qwbs(i,j) = SPVAL ! GFS does not have inst latent heat flux -!assign sst - if (sm(i,j) /= 0.0) then - sst(i,j) = ths(i,j) * (pint(i,j,lp1)/p1000)**capa - else - sst(i,j) = spval - endif - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',ths(isa,jsa) - -! foundation temperature - VarName='tref' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,fdnsst) - if(debugprint)print*,'sample ',VarName,' = ',fdnsst(isa,jsa) - -! GFS does not have time step and physics time step, make up ones since they -! are not really used anyway -! NPHS=1. -! DT=90. -! DTQ2 = DT * NPHS !MEB need to get physics DT - DTQ2 = DTP !MEB need to get physics DT - NPHS=1 - DT = DTQ2/NPHS !MEB need to get DT - TSPH = 3600./DT - -! convective precip in m per physics time step using getgb -! read 3 hour bucket - VarName='cpratb_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgcprate) -! where(avgcprate /= spval)avgcprate=avgcprate*dtq2/1000. ! convert to m -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgcprate(i,j) /= spval) avgcprate(i,j) = avgcprate(i,j) * (dtq2*0.001) - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgcprate(isa,jsa) - -! print*,'maxval CPRATE: ', maxval(CPRATE) - -! read continuous bucket - VarName='cprat_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgcprate_cont) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgcprate_cont(i,j) /= spval) avgcprate_cont(i,j) = & - avgcprate_cont(i,j) * (dtq2*0.001) - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgcprate_cont(isa,jsa) - -! print*,'maxval CPRATE: ', maxval(CPRATE) - -! precip rate in m per physics time step using getgb - VarName='prateb_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgprec) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if(avgprec(i,j) /= spval)avgprec(i,j)=avgprec(i,j)*(dtq2*0.001) - enddo - enddo - - if(debugprint)print*,'sample ',VarName,' = ',avgprec(isa,jsa) - -! prec = avgprec !set avg cprate to inst one to derive other fields - - VarName='prate_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgprec_cont) -! where(avgprec /= spval)avgprec=avgprec*dtq2/1000. ! convert to m -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgprec_cont(i,j) /=spval)avgprec_cont(i,j)=avgprec_cont(i,j) & - * (dtq2*0.001) - enddo - enddo - - if(debugprint)print*,'sample ',VarName,' = ',avgprec_cont(isa,jsa) -! precip rate in m per physics time step - VarName='tprcp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,prec) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (prec(i,j) /= spval) prec(i,j)=prec(i,j)* (dtq2*0.001) & - * 1000. / dtp - enddo - enddo - -! convective precip rate in m per physics time step - VarName='cnvprcp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,cprate) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (cprate(i,j) /= spval) then - cprate(i,j) = max(0.,cprate(i,j)) * (dtq2*0.001) & - * 1000. / dtp - else - cprate(i,j) = 0. - endif - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',cprate(isa,jsa) - -! GFS does not have accumulated total, gridscale, and convective precip, will use inst precip to derive in SURFCE.f - -! max hourly 1-km agl reflectivity -! VarName='refdmax' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,refd_max) -! if(debugprint)print*,'sample ',VarName,' = ',refd_max(isa,jsa) -! max hourly -10C reflectivity -! VarName='refdmax263k' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,refdm10c_max) -! if(debugprint)print*,'sample ',VarName,' = ',refdm10c_max(isa,jsa) - -! max hourly u comp of 10m agl wind -! VarName='u10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,u10max) -! if(debugprint)print*,'sample ',VarName,' = ',u10max(isa,jsa) -! max hourly v comp of 10m agl wind -! VarName='v10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,v10max) -! if(debugprint)print*,'sample ',VarName,' = ',v10max(isa,jsa) -! max hourly 10m agl wind speed -! VarName='spd10max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,wspd10max) -! if(debugprint)print*,'sample ',VarName,' = ',wspd10max(isa,jsa) - - -! 2m T using nemsio - VarName='tmp2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,tshltr) - if(debugprint)print*,'sample ',VarName,' = ',tshltr(isa,jsa) - -! inst snow water eqivalent using nemsio - VarName='weasd' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sno) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j)==0.) sno(i,j) = spval - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',sno(isa,jsa) - -! ave snow cover - VarName='snowc_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,snoavg) -! snow cover is multipled by 100 in SURFCE before writing it out - do j=jsta,jend - do i=ista,iend - if (sm(i,j)==1.0 .and. sice(i,j)==0.) snoavg(i,j)=spval - if(snoavg(i,j)/=spval)snoavg(i,j)=snoavg(i,j)/100. - end do - end do - -! snow depth in mm using nemsio - VarName='snod' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,si) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j)==1.0 .and. sice(i,j)==0.) si(i,j)=spval - if (si(i,j) /= spval) si(i,j) = si(i,j) * 1000.0 - CLDEFI(i,j) = SPVAL ! GFS does not have convective cloud efficiency - lspa(i,j) = spval ! GFS does not have similated precip - TH10(i,j) = SPVAL ! GFS does not have 10 m theta - TH10(i,j) = SPVAL ! GFS does not have 10 m theta - Q10(i,j) = SPVAL ! GFS does not have 10 m humidity - ALBASE(i,j) = SPVAL ! GFS does not have snow free albedo - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',si(isa,jsa) - -! 2m T using nemsio - VarName='tmp2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,tshltr) - if(debugprint)print*,'sample ',VarName,' = ',tshltr(isa,jsa) - -! GFS does not have 2m pres, estimate it, also convert t to theta - Do j=jsta,jend - do i=ista,iend - PSHLTR(I,J)=pint(I,J,lm+1)*EXP(-0.068283/tshltr(i,j)) - tshltr(i,j)= tshltr(i,j)*(p1000/PSHLTR(I,J))**CAPA ! convert to theta -! if (j == jm/2 .and. mod(i,50) == 0) -! + print*,'sample 2m T and P after scatter= ' -! + ,i,j,tshltr(i,j),pshltr(i,j) - end do - end do - -! 2m specific humidity using nemsio - VarName='spfh2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,qshltr) - if(debugprint)print*,'sample ',VarName,' = ',qshltr(isa,jsa) - -! mid day avg albedo in fraction using nemsio -! VarName='albdosfc' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,avgalbedo) -!! where(avgalbedo /= spval)avgalbedo=avgalbedo/100. ! convert to fraction -!!$omp parallel do private(i,j) -! do j=jsta,jend -! do i=ista,iend -! if (avgalbedo(i,j) /= spval) avgalbedo(i,j) = avgalbedo(i,j) * 0.01 -! enddo -! enddo -! if(debugprint)print*,'sample ',VarName,' = ',avgalbedo(isa,jsa) - -! time averaged column cloud fractionusing nemsio - VarName='tcdc_aveclm' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgtcdc) -! where(avgtcdc /= spval)avgtcdc=avgtcdc/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgtcdc(i,j) /= spval) avgtcdc(i,j) = avgtcdc(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgtcdc(isa,jsa) - -! GFS probably does not use zenith angle -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l, iend_2u - Czen(i,j) = spval - CZMEAN(i,j) = SPVAL - enddo - enddo - -! maximum snow albedo in fraction using nemsio - VarName='snoalb' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,mxsnal) -! where(mxsnal /= spval)mxsnal=mxsnal/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (mxsnal(i,j) /= spval) mxsnal(i,j) = mxsnal(i,j) * 0.01 - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',mxsnal(isa,jsa) - -! land fraction - VarName='lfrac' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,landfrac) - -! GFS probably does not use sigt4, set it to sig*t^4 -!$omp parallel do private(i,j,tlmh) - Do j=jsta,jend - do i=ista,iend - TLMH = T(I,J,LM) * T(I,J,LM) - Sigt4(i,j) = 5.67E-8 * TLMH * TLMH - End do - End do - -! TG is not used, skip it for now - -! GFS does not have inst cloud fraction for high, middle, and low cloud -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - cfrach(i,j) = spval - cfracl(i,j) = spval - cfracm(i,j) = spval - enddo - enddo - -! ave high cloud fraction using nemsio - VarName='tcdc_avehcl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgcfrach) -! where(avgcfrach /= spval)avgcfrach=avgcfrach/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgcfrach(i,j) /= spval) avgcfrach(i,j) = avgcfrach(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfrach(isa,jsa) - -! ave low cloud fraction using nemsio - VarName='tcdc_avelcl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgcfracl) -! where(avgcfracl /= spval)avgcfracl=avgcfracl/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgcfracl(i,j) /= spval) avgcfracl(i,j) = avgcfracl(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfracl(isa,jsa) - -! ave middle cloud fraction using nemsio - VarName='tcdc_avemcl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgcfracm) -! where(avgcfracm /= spval)avgcfracm=avgcfracm/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (avgcfracm(i,j) /= spval) avgcfracm(i,j) = avgcfracm(i,j) * 0.01 - enddo - enddo - if(debugprint)print*,'sample ',VarName,' = ',avgcfracm(isa,jsa) - -! inst convective cloud fraction using nemsio - VarName='tcdccnvcl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,cnvcfr) -! where(cnvcfr /= spval)cnvcfr=cnvcfr/100. ! convert to fraction -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (cnvcfr(i,j) /= spval) cnvcfr (i,j)= cnvcfr(i,j) * 0.01 - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',cnvcfr(isa,jsa) - -! slope type using nemsio - VarName='sltyp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,buf) -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=ista_2l,iend_2u - if (buf(i,j) < spval) then - islope(i,j) = nint(buf(i,j)) - else - islope(i,j) = 0 - endif - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',islope(isa,jsa) - -! plant canopy sfc wtr in m - VarName='cnwat' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,cmc) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (cmc(i,j) /= spval) cmc(i,j) = cmc(i,j) * 0.001 - if (sm(i,j) /= 0.0) cmc(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',cmc(isa,jsa) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - grnflx(i,j) = spval ! GFS does not have inst ground heat flux - enddo - enddo - -! frozen precip fraction using nemsio - VarName='cpofp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sr) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if(sr(i,j) /= spval) then -!set range within (0,1) - sr(i,j)=min(1.,max(0.,sr(i,j))) - endif - enddo - enddo - -! sea ice skin temperature - VarName='tisfc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ti) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sice(i,j) == spval .or. sice(i,j) == 0.) ti(i,j)=spval - enddo - enddo - -! vegetation fraction in fraction. using nemsio - VarName='veg' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,vegfrc) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (vegfrc(i,j) /= spval) then - vegfrc(i,j) = vegfrc(i,j) * 0.01 - else - vegfrc(i,j) = 0.0 - endif - enddo - enddo -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) vegfrc(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample ',VarName,' = ',vegfrc(isa,jsa) - -! GFS doesn not yet output soil layer thickness, assign SLDPTH to be the same as nam - - SLDPTH(1) = 0.10 - SLDPTH(2) = 0.3 - SLDPTH(3) = 0.6 - SLDPTH(4) = 1.0 - -! liquid volumetric soil mpisture in fraction using nemsio - VarName='soill1' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sh2o(ista_2l,jsta_2l,1)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) sh2o(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,1) - - VarName='soill2' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sh2o(ista_2l,jsta_2l,2)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) sh2o(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,2) - - VarName='soill3' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sh2o(ista_2l,jsta_2l,3)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) sh2o(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,3) - - VarName='soill4' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sh2o(ista_2l,jsta_2l,4)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) sh2o(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,sh2o(isa,jsa,4) - -! volumetric soil moisture using nemsio - VarName='soilw1' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smc(ista_2l,jsta_2l,1)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smc(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,1) - - VarName='soilw2' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smc(ista_2l,jsta_2l,2)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smc(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,2) - - VarName='soilw3' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smc(ista_2l,jsta_2l,3)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smc(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,3) - - VarName='soilw4' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smc(ista_2l,jsta_2l,4)) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smc(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample l',VarName,' = ',1,smc(isa,jsa,4) - -! soil temperature using nemsio - VarName='soilt1' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,stc(ista_2l,jsta_2l,1)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,1) = spval - !if (sm(i,j) /= 0.0) stc(i,j,1) = spval - enddo - enddo - if(debugprint)print*,'sample l','stc',' = ',1,stc(isa,jsa,1) - - VarName='soilt2' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,stc(ista_2l,jsta_2l,2)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,2) = spval - !if (sm(i,j) /= 0.0) stc(i,j,2) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,2) - - VarName='soilt3' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,stc(ista_2l,jsta_2l,3)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,3) = spval - !if (sm(i,j) /= 0.0) stc(i,j,3) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,3) - - VarName='soilt4' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,stc(ista_2l,jsta_2l,4)) -! mask open water areas, combine with sea ice tmp -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) stc(i,j,4) = spval - !if (sm(i,j) /= 0.0) stc(i,j,4) = spval - enddo - enddo - if(debugprint)print*,'sample stc = ',1,stc(isa,jsa,4) - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - acfrcv(i,j) = spval ! GFS does not output time averaged convective and strat cloud fraction, set acfrcv to spval, ncfrcv to 1 - ncfrcv(i,j) = 1.0 - acfrst(i,j) = spval ! GFS does not output time averaged cloud fraction, set acfrst to spval, ncfrst to 1 - ncfrst(i,j) = 1.0 - bgroff(i,j) = spval ! GFS does not have UNDERGROUND RUNOFF - rlwtoa(i,j) = spval ! GFS does not have inst model top outgoing longwave - enddo - enddo -! trdlw(i,j) = 6.0 - ardlw = 1.0 ! GFS incoming sfc longwave has been averaged over 6 hr bucket, set ARDLW to 1 - -! time averaged incoming sfc longwave - VarName='dlwrf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwin) - -! inst incoming sfc longwave - VarName='dlwrf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,rlwin) - -! time averaged outgoing sfc longwave - VarName='ulwrf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwout) -! inst outgoing sfc longwave - VarName='ulwrf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,radot) - -! where(alwout /= spval) alwout=-alwout ! CLDRAD puts a minus sign before gribbing -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (alwout(i,j) /= spval) alwout(i,j) = -alwout(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,alwout(isa,jsa) - -! time averaged outgoing model top longwave using gfsio - VarName='ulwrf_avetoa' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwtoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,alwtoa(isa,jsa) - -! GFS incoming sfc longwave has been averaged, set ARDLW to 1 - ardsw=1.0 -! trdsw=6.0 - -! time averaged incoming sfc shortwave - VarName='dswrf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswin) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswin(isa,jsa) - -! inst incoming sfc shortwave - VarName='dswrf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,rswin) - -! inst incoming clear sky sfc shortwave - VarName='csdlf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,rswinc) - -! time averaged incoming sfc uv-b using getgb - VarName='duvb_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,auvbin) -! if(debugprint)print*,'sample l',VarName,' = ',1,auvbin(isa,jsa) - -! time averaged incoming sfc clear sky uv-b using getgb - VarName='cduvb_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,auvbinc) -! if(debugprint)print*,'sample l',VarName,' = ',1,auvbinc(isa,jsa) - -! time averaged outgoing sfc shortwave using gfsio - VarName='uswrf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswout) -! where(aswout /= spval) aswout=-aswout ! CLDRAD puts a minus sign before gribbing -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (aswout(i,j) /= spval) aswout(i,j) = -aswout(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,aswout(isa,jsa) - -! inst outgoing sfc shortwave using gfsio - VarName='uswrf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,rswout) - -! time averaged model top incoming shortwave - VarName='dswrf_avetoa' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswintoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswintoa(isa,jsa) - -! time averaged model top outgoing shortwave - VarName='uswrf_avetoa' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswtoa) -! if(debugprint)print*,'sample l',VarName,' = ',1,aswtoa(isa,jsa) - -! time averaged surface sensible heat flux, multiplied by -1 because wrf model flux -! has reversed sign convention using gfsio - VarName='shtfl_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sfcshx) -! where (sfcshx /= spval)sfcshx=-sfcshx -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sfcshx(i,j) /= spval) sfcshx(i,j) = -sfcshx(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcshx(isa,jsa) - -! inst surface sensible heat flux - VarName='shtfl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,twbs) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (twbs(i,j) /= spval) twbs(i,j) = -twbs(i,j) - enddo - enddo - -! GFS surface flux has been averaged, set ASRFC to 1 - asrfc=1.0 -! tsrfc=6.0 - -! time averaged surface latent heat flux, multiplied by -1 because wrf model flux -! has reversed sign vonvention using gfsio - VarName='lhtfl_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sfclhx) -! where (sfclhx /= spval)sfclhx=-sfclhx -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sfclhx(i,j) /= spval) sfclhx(i,j) = -sfclhx(i,j) - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,sfclhx(isa,jsa) - -! inst surface latent heat flux - VarName='lhtfl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,qwbs) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (qwbs(i,j) /= spval) qwbs(i,j) = -qwbs(i,j) - enddo - enddo - - if(me==0)print*,'rdaod= ',rdaod -! inst aod550 optical depth - if(rdaod) then - VarName='aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aod550) - - VarName='du_aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,du_aod550) - - VarName='ss_aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ss_aod550) - - VarName='su_aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,su_aod550) - - VarName='oc_aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,oc_aod550) - - VarName='bc_aod550' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,bc_aod550) - endif !end if rdaod - - -! time averaged ground heat flux using nemsio - VarName='gflux_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,subshx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) subshx(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,subshx(isa,jsa) - -! inst ground heat flux using nemsio - VarName='gflux' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,grnflx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) grnflx(i,j) = spval - enddo - enddo - -! time averaged zonal momentum flux using gfsio - VarName='uflx_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sfcux) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcux(isa,jsa) - -! time averaged meridional momentum flux using nemsio - VarName='vflx_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,sfcvx) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcvx(isa,jsa) - -! dong read in inst surface flux -! inst zonal momentum flux using gfsio -! VarName='uflx' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcuxi) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcuxi(isa,jsa) - -! inst meridional momentum flux using nemsio -! VarName='vflx' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,sfcvxi) -! if(debugprint)print*,'sample l',VarName,' = ',1,sfcvxi(isa,jsa) - - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - sfcuvx(i,j) = spval ! GFS does not use total momentum flux - enddo - enddo - -! time averaged zonal gravity wave stress using nemsio - VarName='u-gwd_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,gtaux) -! if(debugprint)print*,'sample l',VarName,' = ',1,gtaux(isa,jsa) - -! time averaged meridional gravity wave stress using getgb - VarName='v-gwd_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,gtauy) -! if(debugprint)print*,'sample l',VarName,' = ',1,gtauy(isa,jsa) - -! time averaged accumulated potential evaporation - VarName='pevpr_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgpotevp) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) avgpotevp(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,potevp(isa,jsa) - -! inst potential evaporation - VarName='pevpr' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,potevp) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) == 1.0 .and. sice(i,j) ==0.) potevp(i,j) = spval - enddo - enddo - - do l=1,lm -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u -! GFS does not have temperature tendency due to long wave radiation - rlwtt(i,j,l) = spval -! GFS does not have temperature tendency due to short wave radiation - rswtt(i,j,l) = spval -! GFS does not have temperature tendency due to latent heating from convection - tcucn(i,j,l) = spval - tcucns(i,j,l) = spval -! GFS does not have temperature tendency due to latent heating from grid scale - train(i,j,l) = spval - enddo - enddo - enddo - -! set avrain to 1 - avrain=1.0 - avcnvc=1.0 - theat=6.0 ! just in case GFS decides to output T tendency - -! 10 m u using nemsio - VarName='ugrd10m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,u10) - - do j=jsta,jend - do i=ista,iend - u10h(i,j)=u10(i,j) - end do - end do -! if(debugprint)print*,'sample l',VarName,' = ',1,u10(isa,jsa) - -! 10 m v using gfsio - VarName='vgrd10m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,v10) - - do j=jsta,jend - do i=ista,iend - v10h(i,j)=v10(i,j) - end do - end do -! if(debugprint)print*,'sample l',VarName,' = ',1,v10(isa,jsa) - -! vegetation type, it's in GFS surface file, hopefully will merge into gfsio soon - VarName='vtype' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,buf) -! where (buf /= spval) -! ivgtyp=nint(buf) -! elsewhere -! ivgtyp=0 !need to feed reasonable value to crtm -! end where -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=ista_2l,iend_2u - if (buf(i,j) < spval) then - ivgtyp(i,j) = nint(buf(i,j)) - else - ivgtyp(i,j) = 0 - endif - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,ivgtyp(isa,jsa) - -! soil type, it's in GFS surface file, hopefully will merge into gfsio soon - VarName='sotyp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,buf) -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=ista_2l,iend_2u - if (buf(i,j) < spval) then - isltyp(i,j) = nint(buf(i,j)) - else - isltyp(i,j) = 0 !need to feed reasonable value to crtm - endif - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,isltyp(isa,jsa) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - smstav(i,j) = spval ! GFS does not have soil moisture availability -! smstot(i,j) = spval ! GFS does not have total soil moisture - sfcevp(i,j) = spval ! GFS does not have accumulated surface evaporation - acsnow(i,j) = spval ! GFS does not have averaged accumulated snow - acsnom(i,j) = spval ! GFS does not have snow melt -! sst(i,j) = spval ! GFS does not have sst???? - thz0(i,j) = ths(i,j) ! GFS does not have THZ0, use THS to substitute - qz0(i,j) = spval ! GFS does not output humidity at roughness length - uz0(i,j) = spval ! GFS does not output u at roughness length - vz0(i,j) = spval ! GFS does not output humidity at roughness length - enddo - enddo - do l=1,lm -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - EL_PBL(i,j,l) = spval ! GFS does not have mixing length - exch_h(i,j,l) = spval ! GFS does not output exchange coefficient - enddo - enddo - enddo -! if(debugprint)print*,'sample l',VarName,' = ',1,thz0(isa,jsa) - -! retrieve inst convective cloud top, GFS has cloud top pressure instead of index, -! will need to modify CLDRAD.f to use pressure directly instead of index -! VarName='pres' -! VcoordName='convect-cld top' -! l=1 -! if(debugprint)print*,'sample l',VarName,' = ',1,ptop(isa,jsa) - VarName='prescnvclt' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ptop) - - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - htop(i,j) = spval - if(ptop(i,j) <= 0.0) ptop(i,j) = spval - enddo - enddo - do j=jsta,jend - do i=ista,iend - if(ptop(i,j) < spval)then - do l=1,lm - if(ptop(i,j) <= pmid(i,j,l))then - htop(i,j) = l -! if(i==ii .and. j==jj)print*,'sample ptop,pmid pmid-1,pint= ', & -! ptop(i,j),pmid(i,j,l),pmid(i,j,l-1),pint(i,j,l),htop(i,j) - exit - end if - end do - end if - end do - end do - -! retrieve inst convective cloud bottom, GFS has cloud top pressure instead of index, -! will need to modify CLDRAD.f to use pressure directly instead of index - VarName='prescnvclb' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pbot) -! if(debugprint)print*,'sample l',VarName,VcoordName,' = ',1,pbot(isa,jsa) -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - hbot(i,j) = spval - if(pbot(i,j) <= 0.0) pbot(i,j) = spval - enddo - enddo - do j=jsta,jend - do i=ista,iend -! if(.not.lb(i,j))print*,'false bitmask for pbot at ' -! + ,i,j,pbot(i,j) - if(pbot(i,j) < spval)then - do l=lm,1,-1 - if(pbot(i,j) >= pmid(i,j,l)) then - hbot(i,j) = l -! if(i==ii .and. j==jj)print*,'sample pbot,pmid= ', & -! pbot(i,j),pmid(i,j,l),hbot(i,j) - exit - end if - end do - end if - end do - end do - if(debugprint)print*,'sample hbot = ',hbot(isa,jsa) -! retrieve time averaged low cloud top pressure using nemsio - VarName='pres_avelct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ptopl) -! if(debugprint)print*,'sample l',VarName,' = ',1,ptopl(isa,jsa) - -! retrieve time averaged low cloud bottom pressure using nemsio - VarName='pres_avelcb' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pbotl) -! if(debugprint)print*,'sample l',VarName,' = ',1,pbotl(isa,jsa) - -! retrieve time averaged low cloud top temperature using nemsio - VarName='tmp_avelct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,Ttopl) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttopl(isa,jsa) - -! retrieve time averaged middle cloud top pressure using nemsio - VarName='pres_avemct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ptopm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,ptopm(isa,jsa) - -! retrieve time averaged middle cloud bottom pressure using nemsio - VarName='pres_avemcb' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pbotm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,pbotm(isa,jsa) - -! retrieve time averaged middle cloud top temperature using nemsio - VarName='tmp_avemct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,Ttopm) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttopm(isa,jsa) - -! retrieve time averaged high cloud top pressure using nemsio ********* - VarName='pres_avehct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,ptoph) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,ptoph(isa,jsa) - -! retrieve time averaged high cloud bottom pressure using nemsio - VarName='pres_avehcb' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pboth) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,pboth(isa,jsa) - -! retrieve time averaged high cloud top temperature using nemsio - VarName='tmp_avehct' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,Ttoph) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ',1,Ttoph(isa,jsa) - -! retrieve boundary layer cloud cover using nemsio - VarName='tcdc_avebndcl' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pblcfr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,pblcfr(isa,jsa) -! where (pblcfr /= spval)pblcfr=pblcfr/100. ! convert to fraction -!$omp parallel do private(i,j) - do j = jsta_2l, jend_2u - do i=ista_2l,iend_2u - if (pblcfr(i,j) < spval) pblcfr(i,j) = pblcfr(i,j) * 0.01 - enddo - enddo - -! retrieve cloud work function - VarName='cwork_aveclm' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,cldwork) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,cldwork(isa,jsa) - -! accumulated total (base+surface) runoff - VarName='watr_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,runoff) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) runoff(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,runoff(isa,jsa) - -! accumulated evaporation of intercepted water - VarName='ecan_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,tecan) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) tecan(i,j) = spval - enddo - enddo - -! accumulated plant transpiration - VarName='etran_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,tetran) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) tetran(i,j) = spval - enddo - enddo - -! accumulated soil surface evaporation - VarName='edir_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,tedir) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) tedir(i,j) = spval - enddo - enddo - -! total water storage in aquifer - VarName='wa_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,twa) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) twa(i,j) = spval - enddo - enddo - -! retrieve shelter max temperature using nemsio - VarName='tmax_max2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,maxtshltr) - -! retrieve shelter min temperature using nemsio - VarName='tmin_min2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,mintshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', & -! 1,mintshltr(im/2,(jsta+jend)/2) - -! retrieve shelter max RH -! VarName='rh02max' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,maxrhshltr) - -! retrieve shelter min temperature using nemsio -! VarName='rh02min' -! call read_netcdf_2d_scatter(me,ncid2d,1,im,jm,jsta,jsta_2l & -! ,jend_2u,MPI_COMM_COMP,icnt,idsp,spval,VarName,minrhshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', & -! 1,mintshltr(im/2,(jsta+jend)/2) - -!$omp parallel do private(i,j) - do j=jsta_2l,jend_2u - do i=ista_2l,iend_2u - MAXRHSHLTR(i,j) = SPVAL - MINRHSHLTR(i,j) = SPVAL - enddo - enddo - -! retrieve ice thickness using nemsio - VarName='icetk' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,dzice) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,dzice(isa,jsa) - -! retrieve wilting point using nemsio - VarName='wilt' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smcwlt) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smcwlt(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,smcwlt(isa,jsa) - -! retrieve sunshine duration using nemsio - VarName='sunsd_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,suntime) - -! retrieve field capacity using nemsio - VarName='fldcp' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,fieldcapa) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) fieldcapa(i,j) = spval - enddo - enddo -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', 1,fieldcapa(isa,jsa) - -! retrieve time averaged surface visible beam downward solar flux - VarName='vbdsf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avisbeamswin) - l=1 - -! retrieve time averaged surface visible diffuse downward solar flux - VarName='vddsf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avisdiffswin) - -! retrieve time averaged surface near IR beam downward solar flux - VarName='nbdsf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,airbeamswin) - -! retrieve time averaged surface near IR diffuse downward solar flux - VarName='nddsf_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,airdiffswin) - -! retrieve time averaged surface clear sky outgoing LW - VarName='csulf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwoutc) - -! retrieve time averaged TOA clear sky outgoing LW - VarName='csulftoa' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwtoac) - -! retrieve time averaged surface clear sky outgoing SW - VarName='csusf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswoutc) - -! retrieve time averaged TOA clear sky outgoing LW - VarName='csusftoa' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswtoac) - -! retrieve time averaged surface clear sky incoming LW - VarName='csdlf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,alwinc) - -! retrieve time averaged surface clear sky incoming SW - VarName='csdsf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,aswinc) - -! retrieve shelter max specific humidity using nemsio - VarName='spfhmax_max2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,maxqshltr) -! if(debugprint)print*,'sample l',VcoordName,VarName,' = ', -! 1,maxqshltr(isa,jsa) - -! retrieve shelter min temperature using nemsio - VarName='spfhmin_min2m' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,minqshltr) - -! retrieve storm runoff using nemsio - VarName='ssrun_acc' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,SSROFF) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) ssroff(i,j) = spval - enddo - enddo - -! retrieve direct soil evaporation - VarName='evbs_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgedir) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) avgedir(i,j) = spval - enddo - enddo - -! retrieve CANOPY WATER EVAP - VarName='evcw_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgecan) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) avgecan(i,j) = spval - enddo - enddo - -! retrieve AVERAGED PRECIP ADVECTED HEAT FLUX - VarName='pah_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,paha) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) paha(i,j) = spval - enddo - enddo - -! retrieve nstantaneous PRECIP ADVECTED HEAT FLUX - VarName='pahi' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,pahi) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) pahi(i,j) = spval - enddo - enddo - -! retrieve PLANT TRANSPIRATION - VarName='trans_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgetrans) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) avgetrans(i,j) = spval - enddo - enddo - -! retrieve snow sublimation - VarName='sbsno_ave' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,avgesnow) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j)==1.0 .and. sice(i,j)==0.) avgesnow(i,j)=spval - enddo - enddo - -! retrive total soil moisture - VarName='soilm' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,smstot) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) smstot(i,j) = spval - enddo - enddo - -! retrieve snow phase change heat flux - VarName='snohf' - call read_netcdf_2d_para(ncid2d,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,VarName,snopcx) -! mask water areas -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - if (sm(i,j) /= 0.0) snopcx(i,j) = spval - enddo - enddo - -! GFS does not have deep convective cloud top and bottom fields - -!$omp parallel do private(i,j) - do j=jsta,jend - do i=ista,iend - HTOPD(i,j) = SPVAL - HBOTD(i,j) = SPVAL - HTOPS(i,j) = SPVAL - HBOTS(i,j) = SPVAL - CUPPT(i,j) = SPVAL - enddo - enddo - -! done with flux file, close it for now - Status=nf90_close(ncid2d) -! deallocate(tmp,recname,reclevtyp,reclev) - -! pos east - call collect_loc(gdlat,dummy) - if(me == 1)then - write(6,*) 'laststart,latlast,me B calling bcast=',latstart,latlast,me - endif - if(me == 0)then - latstart = nint(dummy(1,1)*gdsdegr) - latlast = nint(dummy(im,jm)*gdsdegr) - write(6,*) 'laststart,latlast B bcast= ',latstart,latlast,'gdsdegr=',gdsdegr,& - 'dummy(1,1)=',dummy(1,1),dummy(im,jm),'gdlat=',gdlat(1,1) - end if - call mpi_bcast(latstart,1,MPI_INTEGER,0,mpi_comm_comp,irtn) - call mpi_bcast(latlast,1,MPI_INTEGER,0,mpi_comm_comp,irtn) - write(6,*) 'laststart,latlast,me A calling bcast=',latstart,latlast,me - call collect_loc(gdlon,dummy) - if(me == 0)then - lonstart = nint(dummy(1,1)*gdsdegr) - lonlast = nint(dummy(im,jm)*gdsdegr) - end if - call mpi_bcast(lonstart,1,MPI_INTEGER,0,mpi_comm_comp,irtn) - call mpi_bcast(lonlast, 1,MPI_INTEGER,0,mpi_comm_comp,irtn) - - write(6,*)'lonstart,lonlast A calling bcast=',lonstart,lonlast -! - -! generate look up table for lifted parcel calculations - - THL = 210. - PLQ = 70000. - pt_TBL = 10000. ! this is for 100 hPa added by Moorthi - - CALL TABLE(PTBL,TTBL,PT_TBL, & - RDQ,RDTH,RDP,RDTHE,PL,THL,QS0,SQS,STHE,THE0) - - CALL TABLEQ(TTBLQ,RDPQ,RDTHEQ,PLQ,THL,STHEQ,THE0Q) - -! -! - IF(ME == 0)THEN - WRITE(6,*)' SPL (POSTED PRESSURE LEVELS) BELOW: ' - WRITE(6,51) (SPL(L),L=1,LSM) - 50 FORMAT(14(F4.1,1X)) - 51 FORMAT(8(F8.1,1X)) - ENDIF -! -!$omp parallel do private(l) - DO L = 1,LSM - ALSL(L) = LOG(SPL(L)) - END DO -! -!HC WRITE IGDS OUT FOR WEIGHTMAKER TO READ IN AS KGDSIN - if(me == 0)then - print*,'writing out igds' - igdout = 110 -! open(igdout,file='griddef.out',form='unformatted' -! + ,status='unknown') - if(maptype == 1)THEN ! Lambert conformal - WRITE(igdout)3 - WRITE(6,*)'igd(1)=',3 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)TRUELAT2 - WRITE(igdout)TRUELAT1 - WRITE(igdout)255 - ELSE IF(MAPTYPE == 2)THEN !Polar stereographic - WRITE(igdout)5 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)TRUELAT2 !Assume projection at +-90 - WRITE(igdout)TRUELAT1 - WRITE(igdout)255 - ! Note: The calculation of the map scale factor at the standard - ! lat/lon and the PSMAPF - ! Get map factor at 60 degrees (N or S) for PS projection, which will - ! be needed to correctly define the DX and DY values in the GRIB GDS - if (TRUELAT1 < 0.) THEN - LAT = -60. - else - LAT = 60. - end if - - CALL MSFPS (LAT,TRUELAT1*0.001,PSMAPF) - - ELSE IF(MAPTYPE == 3) THEN !Mercator - WRITE(igdout)1 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)8 - WRITE(igdout)latlast - WRITE(igdout)lonlast - WRITE(igdout)TRUELAT1 - WRITE(igdout)0 - WRITE(igdout)64 - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)255 - ELSE IF(MAPTYPE == 0 .OR. MAPTYPE == 203)THEN !A STAGGERED E-GRID - WRITE(igdout)203 - WRITE(igdout)im - WRITE(igdout)jm - WRITE(igdout)LATSTART - WRITE(igdout)LONSTART - WRITE(igdout)136 - WRITE(igdout)CENLAT - WRITE(igdout)CENLON - WRITE(igdout)DXVAL - WRITE(igdout)DYVAL - WRITE(igdout)64 - WRITE(igdout)0 - WRITE(igdout)0 - WRITE(igdout)0 - END IF - end if -! -! - - RETURN - END - - subroutine read_netcdf_3d_para(ncid,im,jm,ista,ista_2l,iend,iend_2u,jsta,jsta_2l,jend,jend_2u, & - spval,varname,buf,lm) - - use netcdf - use ctlblk_mod, only : me - use params_mod, only : small - implicit none - INCLUDE "mpif.h" - - character(len=20),intent(in) :: varname - real,intent(in) :: spval - integer,intent(in) :: ncid,im,jm,lm,jsta_2l,jend_2u,jsta,jend - integer,intent(in) :: ista_2l,iend_2u,ista,iend - real,intent(out) :: buf(ista_2l:iend_2u,jsta_2l:jend_2u,lm) - integer :: varid,iret,ii,jj,i,j,l,kk - integer :: start(3), count(3), stride(3) - real,parameter :: spval_netcdf=9.99e+20 - real :: fill_value - - iret = nf90_inq_varid(ncid,trim(varname),varid) - if (iret /= 0) then - if (me == 0) print*,VarName," not found -Assigned missing values" -!$omp parallel do private(i,j,l) - do l=1,lm - do j=jsta,jend - do i=ista,iend - buf(i,j,l)=spval - enddo - enddo - enddo - else - iret = nf90_get_att(ncid,varid,"_FillValue",fill_value) - if (iret /= 0) fill_value = spval_netcdf - start = (/ista,jsta,1/) - ii=iend-ista+1 - jj=jend-jsta+1 - count = (/ii,jj,lm/) - iret = nf90_get_var(ncid,varid,buf(ista:iend,jsta:jend,1:lm),start=start,count=count) - if (iret /= 0) then - print*," iret /=0, Error in reading varid " - endif - do l=1,lm - do j=jsta,jend - do i=ista,iend - if(abs(buf(i,j,l)-fill_value) con_g, fv => con_fvirt, rgas => con_rd, & @@ -80,7 +86,8 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) ardsw, asrfc, avrain, avcnvc, theat, gdsdegr, spl, lsm, alsl, im, jm, im_jm, lm, & jsta_2l, jend_2u, nsoil, lp1, icu_physics, ivegsrc, novegtype, nbin_ss, nbin_bc, & nbin_oc, nbin_su, gocart_on, pt_tbl, hyb_sigp, filenameFlux, fileNameAER, & - iSF_SURFACE_PHYSICS,rdaod, aqfcmaq_on + iSF_SURFACE_PHYSICS,rdaod, aqfcmaq_on, modelname, & + ista, iend, ista_2l, iend_2u,iend_m use gridspec_mod, only: maptype, gridtype, latstart, latlast, lonstart, lonlast, cenlon, & dxval, dyval, truelat2, truelat1, psmapf, cenlat,lonstartv, lonlastv, cenlonv, & latstartv, latlastv, cenlatv,latstart_r,latlast_r,lonstart_r,lonlast_r, STANDLON @@ -160,15 +167,14 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) real, allocatable :: wrk1(:,:), wrk2(:,:) real, allocatable :: p2d(:,:), t2d(:,:), q2d(:,:), & qs2d(:,:), cw2d(:,:), cfr2d(:,:) - real(kind=4),allocatable :: vcoord4(:,:,:) real, dimension(lm+1) :: ak5, bk5 real*8, allocatable :: pm2d(:,:), pi2d(:,:) real, allocatable :: tmp(:) - real :: buf(im,jsta_2l:jend_2u) - real :: buf3d(im,jsta_2l:jend_2u,lm) + real :: buf(ista_2l:iend_2u,jsta_2l:jend_2u) + real :: buf3d(ista_2l:iend_2u,jsta_2l:jend_2u,lm) -! real buf(im,jsta_2l:jend_2u),bufsoil(im,nsoil,jsta_2l:jend_2u) & -! ,buf3d(im,jsta_2l:jend_2u,lm),buf3d2(im,lp1,jsta_2l:jend_2u) +! real buf(ista_2l:iend_2u,jsta_2l:jend_2u),bufsoil(im,nsoil,jsta_2l:jend_2u) & +! ,buf3d(ista_2l:iend_2u,jsta_2l:jend_2u,lm),buf3d2(im,lp1,jsta_2l:jend_2u) real LAT integer isa, jsa, latghf, jtem, idvc, idsl, nvcoord, ip1, nn, npass @@ -235,137 +241,137 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) if (aqfcmaq_on) then - allocate(aacd(im,jsta_2l:jend_2u,lm)) - allocate(aalj(im,jsta_2l:jend_2u,lm)) - allocate(aalk1j(im,jsta_2l:jend_2u,lm)) - allocate(aalk2j(im,jsta_2l:jend_2u,lm)) + allocate(aacd(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aalj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aalk1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aalk2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(abnz1j(im,jsta_2l:jend_2u,lm)) - allocate(abnz2j(im,jsta_2l:jend_2u,lm)) - allocate(abnz3j(im,jsta_2l:jend_2u,lm)) + allocate(abnz1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(abnz2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(abnz3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(acaj(im,jsta_2l:jend_2u,lm)) - allocate(acet(im,jsta_2l:jend_2u,lm)) + allocate(acaj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(acet(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(acli(im,jsta_2l:jend_2u,lm)) - allocate(aclj(im,jsta_2l:jend_2u,lm)) - allocate(aclk(im,jsta_2l:jend_2u,lm)) + allocate(acli(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aclj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aclk(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(acors(im,jsta_2l:jend_2u,lm)) - allocate(acro_primary(im,jsta_2l:jend_2u,lm)) - allocate(acrolein(im,jsta_2l:jend_2u,lm)) - allocate(aeci(im,jsta_2l:jend_2u,lm)) - allocate(aecj(im,jsta_2l:jend_2u,lm)) - allocate(afej(im,jsta_2l:jend_2u,lm)) - allocate(aglyj(im,jsta_2l:jend_2u,lm)) + allocate(acors(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(acro_primary(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(acrolein(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aeci(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aecj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(afej(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aglyj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(ah2oi(im,jsta_2l:jend_2u,lm)) - allocate(ah2oj(im,jsta_2l:jend_2u,lm)) - allocate(ah2ok(im,jsta_2l:jend_2u,lm)) + allocate(ah2oi(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ah2oj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ah2ok(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(ah3opi(im,jsta_2l:jend_2u,lm)) - allocate(ah3opj(im,jsta_2l:jend_2u,lm)) - allocate(ah3opk(im,jsta_2l:jend_2u,lm)) + allocate(ah3opi(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ah3opj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ah3opk(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aiso1j(im,jsta_2l:jend_2u,lm)) - allocate(aiso2j(im,jsta_2l:jend_2u,lm)) - allocate(aiso3j(im,jsta_2l:jend_2u,lm)) + allocate(aiso1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aiso2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aiso3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aivpo1j(im,jsta_2l:jend_2u,lm)) - allocate(akj(im,jsta_2l:jend_2u,lm)) + allocate(aivpo1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(akj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(ald2(im,jsta_2l:jend_2u,lm)) - allocate(ald2_primary(im,jsta_2l:jend_2u,lm)) + allocate(ald2(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ald2_primary(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aldx(im,jsta_2l:jend_2u,lm)) + allocate(aldx(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(alvoo1i(im,jsta_2l:jend_2u,lm)) - allocate(alvoo1j(im,jsta_2l:jend_2u,lm)) - allocate(alvoo2i(im,jsta_2l:jend_2u,lm)) - allocate(alvoo2j(im,jsta_2l:jend_2u,lm)) - allocate(alvpo1i(im,jsta_2l:jend_2u,lm)) - allocate(alvpo1j(im,jsta_2l:jend_2u,lm)) + allocate(alvoo1i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(alvoo1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(alvoo2i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(alvoo2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(alvpo1i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(alvpo1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(amgj(im,jsta_2l:jend_2u,lm)) - allocate(amnj(im,jsta_2l:jend_2u,lm)) + allocate(amgj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(amnj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(anai(im,jsta_2l:jend_2u,lm)) - allocate(anaj(im,jsta_2l:jend_2u,lm)) - allocate(anak(im,jsta_2l:jend_2u,lm)) + allocate(anai(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(anaj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(anak(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(anh4i(im,jsta_2l:jend_2u,lm)) - allocate(anh4j(im,jsta_2l:jend_2u,lm)) - allocate(anh4k(im,jsta_2l:jend_2u,lm)) + allocate(anh4i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(anh4j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(anh4k(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(ano3i(im,jsta_2l:jend_2u,lm)) - allocate(ano3j(im,jsta_2l:jend_2u,lm)) - allocate(ano3k(im,jsta_2l:jend_2u,lm)) + allocate(ano3i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ano3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(ano3k(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aolgaj(im,jsta_2l:jend_2u,lm)) - allocate(aolgbj(im,jsta_2l:jend_2u,lm)) + allocate(aolgaj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aolgbj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aomi(im,jsta_2l:jend_2u,lm)) - allocate(aomj(im,jsta_2l:jend_2u,lm)) + allocate(aomi(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aomj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aorgcj(im,jsta_2l:jend_2u,lm)) + allocate(aorgcj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aothri(im,jsta_2l:jend_2u,lm)) - allocate(aothrj(im,jsta_2l:jend_2u,lm)) + allocate(aothri(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aothrj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(apah1j(im,jsta_2l:jend_2u,lm)) - allocate(apah2j(im,jsta_2l:jend_2u,lm)) - allocate(apah3j(im,jsta_2l:jend_2u,lm)) + allocate(apah1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(apah2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(apah3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(apcsoj(im,jsta_2l:jend_2u,lm)) + allocate(apcsoj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(apomi(im,jsta_2l:jend_2u,lm)) - allocate(apomj(im,jsta_2l:jend_2u,lm)) + allocate(apomi(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(apomj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aseacat(im,jsta_2l:jend_2u,lm)) - allocate(asij(im,jsta_2l:jend_2u,lm)) + allocate(aseacat(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asij(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(aso4i(im,jsta_2l:jend_2u,lm)) - allocate(aso4j(im,jsta_2l:jend_2u,lm)) - allocate(aso4k(im,jsta_2l:jend_2u,lm)) - allocate(asoil(im,jsta_2l:jend_2u,lm)) + allocate(aso4i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aso4j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(aso4k(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asoil(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(asomi(im,jsta_2l:jend_2u,lm)) - allocate(asomj(im,jsta_2l:jend_2u,lm)) + allocate(asomi(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asomj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(asqtj(im,jsta_2l:jend_2u,lm)) + allocate(asqtj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(asvoo1i(im,jsta_2l:jend_2u,lm)) - allocate(asvoo1j(im,jsta_2l:jend_2u,lm)) - allocate(asvoo2i(im,jsta_2l:jend_2u,lm)) - allocate(asvoo2j(im,jsta_2l:jend_2u,lm)) - allocate(asvoo3j(im,jsta_2l:jend_2u,lm)) + allocate(asvoo1i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvoo1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvoo2i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvoo2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvoo3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(asvpo1i(im,jsta_2l:jend_2u,lm)) - allocate(asvpo1j(im,jsta_2l:jend_2u,lm)) - allocate(asvpo2i(im,jsta_2l:jend_2u,lm)) - allocate(asvpo2j(im,jsta_2l:jend_2u,lm)) - allocate(asvpo3j(im,jsta_2l:jend_2u,lm)) + allocate(asvpo1i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvpo1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvpo2i(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvpo2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(asvpo3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(atij(im,jsta_2l:jend_2u,lm)) + allocate(atij(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(atol1j(im,jsta_2l:jend_2u,lm)) - allocate(atol2j(im,jsta_2l:jend_2u,lm)) - allocate(atol3j(im,jsta_2l:jend_2u,lm)) + allocate(atol1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(atol2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(atol3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(atoti(im,jsta_2l:jend_2u,lm)) - allocate(atotj(im,jsta_2l:jend_2u,lm)) - allocate(atotk(im,jsta_2l:jend_2u,lm)) + allocate(atoti(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(atotj(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(atotk(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(atrp1j(im,jsta_2l:jend_2u,lm)) - allocate(atrp2j(im,jsta_2l:jend_2u,lm)) + allocate(atrp1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(atrp2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(axyl1j(im,jsta_2l:jend_2u,lm)) - allocate(axyl2j(im,jsta_2l:jend_2u,lm)) - allocate(axyl3j(im,jsta_2l:jend_2u,lm)) + allocate(axyl1j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(axyl2j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(axyl3j(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) - allocate(pm25ac(im,jsta_2l:jend_2u,lm)) - allocate(pm25at(im,jsta_2l:jend_2u,lm)) - allocate(pm25co(im,jsta_2l:jend_2u,lm)) + allocate(pm25ac(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(pm25at(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) + allocate(pm25co(ista_2l:iend_2u,jsta_2l:jend_2u,lm)) endif @@ -375,14 +381,17 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) WRITE(6,*)'INITPOST: ENTER INITPOST_NETCDF' WRITE(6,*)'me=',me, & 'jsta_2l=',jsta_2l,'jend_2u=', & - jend_2u,'im=',im + jend_2u,'im=',im, & + 'ista_2l=',ista_2l,'iend_2u=',iend_2u, & + 'ista=',ista,'iend=',iend, & + 'iend_m=',iend_m ! - isa = im / 2 + isa = (ista+iend) / 2 jsa = (jsta+jend) / 2 !$omp parallel do private(i,j) do j = jsta_2l, jend_2u - do i=1,im + do i= ista_2l, iend_2u buf(i,j) = spval enddo enddo @@ -617,9 +626,13 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) end if STANDLON = cenlon - print*,'lonstart,latstart,cenlon,cenlat,truelat1,truelat2,stadlon,dyval,dxval', & + print*,'lonstart,latstart,cenlon,cenlat,truelat1,truelat2, & + stadlon,dyval,dxval', & lonstart,latstart,cenlon,cenlat,truelat1,truelat2,standlon,dyval,dxval + else if(trim(varcharval)=='gaussian')then + MAPTYPE=4 + idrt=4 else ! setting default maptype MAPTYPE=0 idrt=0 @@ -636,7 +649,7 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) !$omp parallel do private(i,j) do j = jsta_2l, jend_2u - do i = 1, im + do i = ista_2l, iend_2u LMV(i,j) = lm LMH(i,j) = lm end do @@ -647,7 +660,7 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) !$omp parallel do private(i,j,l) do l = 1, lm do j = jsta_2l, jend_2u - do i = 1, im + do i = ista_2l, iend_2u HTM (i,j,l) = 1.0 VTM (i,j,l) = 1.0 end do @@ -677,7 +690,6 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) ! write(0,*)'nrec=',nrec !allocate(recname(nrec),reclevtyp(nrec),reclev(nrec)) allocate(glat1d(jm),glon1d(im)) - allocate(vcoord4(lm+1,3,2)) ! hardwire idate for now ! idate=(/2017,08,07,00,0,0,0,0/) @@ -712,7 +724,7 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) ! Jili Dong check output format for coordinate reading Status=nf90_inq_varid(ncid3d,'grid_xt',varid) Status=nf90_inquire_variable(ncid3d,varid,ndims = numDims) - if(numDims==1) then + if(numDims==1.and.modelname=="FV3R") then read_lonlat=.true. else read_lonlat=.false. @@ -733,7 +745,7 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) if(numDims==1)then Status=nf90_get_var(ncid3d,varid,glon1d) do j=jsta,jend - do i=1,im + do i=ista,iend gdlon(i,j) = real(glon1d(i),kind=4) end do end do @@ -756,13 +768,13 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) if(maxval(abs(dummy))<2.0*pi)convert_rad_to_deg=.true. if(convert_rad_to_deg)then do j=jsta,jend - do i=1,im + do i=ista,iend gdlon(i,j) = real(dummy(i,j),kind=4)*180./pi end do end do else do j=jsta,jend - do i=1,im + do i=ista,iend gdlon(i,j) = real(dummy(i,j),kind=4) end do end do @@ -802,7 +814,7 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) if(numDims==1)then Status=nf90_get_var(ncid3d,varid,glat1d) do j=jsta,jend - do i=1,im + do i=ista,iend gdlat(i,j) = real(glat1d(j),kind=4) end do end do @@ -813,13 +825,13 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) if(maxval(abs(dummy))1.0E6 .or. pint(1,jsta_2l,lp1)<50000.) & + do i=ista,iend +! if(pint(i,j,lp1)>1.0E6 .or. pint(ista_2l,jsta_2l,lp1)<50000.) & ! print*,'bad psfc ',i,j,pint(i,j,lp1) end do end do @@ -1577,14 +1596,14 @@ SUBROUTINE INITPOST_NETCDF(ncid2d,ncid3d) pt = ak5(1) do j=jsta,jend - do i=1,im + do i=ista,iend pint(i,j,1)= pt end do end do do l=2,lp1 do j=jsta,jend - do i=1,im + do i=ista,iend if (dpres(i,j,l-1) @file -! -!> SUBPROGRAM: UPP_PHYSICS -!! @author JMENG @date 2020-05-20 -!! -!! A collection of UPP subroutines for physics variables calculation. -!! -!! CALCAPE -!! Compute CAPE/CINS and other storm related variables. -!! -!! CALCAPE2 -!! Compute additional storm related variables. -!! -!! CALRH -!! CALRH_NAM -!! CALRH_GFS -!! CALRH_GSD -!! Compute RH using various algorithms. -!! The NAM v4.1.18 ALGORITHM (CALRH_NAM) is selected as default for -!! NMMB and FV3GFS, FV3GEFS, and FV3R for the UPP 2020 unification. -!! -!! CALRH_PW -!! Algorithm use at GSD for RUC and Rapid Refresh -!! -!! FPVSNEW -!! Compute saturation vapor pressure. -!! -!! TVIRTUAL -!! Compute virtual temperature. -!! -!! PROGRAM HISTORY LOG: -!! MAY, 2020 Jesse Meng Initial code -!!------------------------------------------------------------------------------------- -!! +!> +!> @brief upp_physics is a collection of UPP subroutines for physics variables calculation. +!> @author Jesse Meng @date 2020-05-20 + +!> calcape() computes CAPE/CINS and other storm related variables. +!> +!> calcape2() computes additional storm related variables. +!> +!> calrh(), calrh_nam(), calrh_gfs(), calrh_gsd() compute RH using various algorithms. +!> +!> The NAM v4.1.18 algorithm (calrh_nam()) is selected as default for +!> NMMB and FV3GFS, FV3GEFS, and FV3R for the UPP 2020 unification. +!> +!> calrh_pw() algorithm use at GSD for RUC and Rapid Refresh. +!> +!> fpvsnew() computes saturation vapor pressure. +!> +!> tvirtual() computes virtual temperature. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2020-05-20 | Jesse Meng | Initial +!> +!> @author Jesse Meng @date 2020-05-20 module upp_physics implicit none @@ -72,55 +64,35 @@ END SUBROUTINE CALRH ! !------------------------------------------------------------------------------------- ! - SUBROUTINE CALRH_NAM(P1,T1,Q1,RH) -! SUBROUTINE CALRH(P1,T1,Q1,RH) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALRH COMPUTES RELATIVE HUMIDITY -! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-22 -! -! ABSTRACT: -! THIS ROUTINE COMPUTES RELATIVE HUMIDITY GIVEN PRESSURE, -! TEMPERATURE, SPECIFIC HUMIDITY. AN UPPER AND LOWER BOUND -! OF 100 AND 1 PERCENT RELATIVE HUMIDITY IS ENFORCED. WHEN -! THESE BOUNDS ARE APPLIED THE PASSED SPECIFIC HUMIDITY -! ARRAY IS ADJUSTED AS NECESSARY TO PRODUCE THE SET RELATIVE -! HUMIDITY. -! . -! -! PROGRAM HISTORY LOG: -! ??-??-?? DENNIS DEAVEN -! 92-12-22 RUSS TREADON - MODIFIED AS DESCRIBED ABOVE. -! 98-06-08 T BLACK - CONVERSION FROM 1-D TO 2-D -! 98-08-18 MIKE BALDWIN - MODIFY TO COMPUTE RH OVER ICE AS IN MODEL -! 98-12-16 GEOFF MANIKIN - UNDO RH COMPUTATION OVER ICE -! 00-01-04 JIM TUCCILLO - MPI VERSION -! 02-06-11 MIKE BALDWIN - WRF VERSION -! 06-03-19 Wen Meng - MODIFY TOP PRESSURE to 1 PA -! -! USAGE: CALL CALRH(P1,T1,Q1,RH) -! INPUT ARGUMENT LIST: -! P1 - PRESSURE (PA) -! T1 - TEMPERATURE (K) -! Q1 - SPECIFIC HUMIDITY (KG/KG) -! -! OUTPUT ARGUMENT LIST: -! RH - RELATIVE HUMIDITY (DECIMAL FORM) -! Q1 - ADJUSTED SPECIFIC HUMIDITY (KG/KG) -! -! OUTPUT FILES: -! NONE -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! LIBRARY: -! NONE -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN -! MACHINE : CRAY C-90 -!$$$ -! +!> calrh_nam() computes relative humidity. +!> +!> This routine computes relative humidity given pressure, +!> temperature, specific humidity. an upper and lower bound +!> of 100 and 1 percent relative humidity is enforced. When +!> these bounds are applied the passed specific humidity +!> array is adjusted as necessary to produce the set relative +!> humidity. +!> +!> @param[in] P1 Pressure (pa) +!> @param[in] T1 Temperature (K) +!> @param[in] Q1 Specific humidity (kg/kg) +!> @param[out] RH Relative humidity (decimal form) +!> @param[out] Q1 Specific humidity (kg/kg) +!> +!> ### Program History Log +!> Date | Programmer | Comments +!> -----|------------|--------- +!> ????-??-?? | DENNIS DEAVEN | Initial +!> 1992-12-22 | Russ Treadon | Modified as described above +!> 1998-06-08 | T Black | Conversion from 1-D to 2-D +!> 1998-08-18 | Mike Baldwin | Modify to compute RH over ice as in model +!> 1998-12-16 | Geoff Manikin | undo RH computation over ice +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-06-11 | Mike Baldwin | WRF Version +!> 2006-03-19 | Wen Meng | Modify top pressure to 1 pa +!> +!> @author Russ Treadon W/NP2 @date 1992-12-22 + SUBROUTINE CALRH_NAM(P1,T1,Q1,RH) use params_mod, only: PQ0, a2, a3, a4, rhmin use ctlblk_mod, only: ista, iend, jsta, jend, spval !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -171,55 +143,37 @@ END SUBROUTINE CALRH_NAM ! !------------------------------------------------------------------------------------- ! +!> calrh_gfs() computes relative humidity. +!> +!> This routine computes relative humidity given pressure, +!> temperature, specific humidity. an upper and lower bound +!> of 100 and 1 percent relative humidity is enforced. When +!> these bounds are applied the passed specific humidity +!> array is adjusted as necessary to produce the set relative +!> humidity. +!> +!> @param[in] P1 Pressure (pa) +!> @param[in] T1 Temperature (K) +!> @param[in] Q1 Specific humidity (kg/kg) +!> @param[out] RH Relative humidity (decimal form) +!> @param[out] Q1 Specific humidity (kg/kg) +!> +!> ### Program History Log +!> Date | Programmer | Comments +!> -----|------------|--------- +!> ????-??-?? | DENNIS DEAVEN | Initial +!> 1992-12-22 | Russ Treadon | Modified as described above +!> 1998-06-08 | T Black | Conversion from 1-D to 2-D +!> 1998-08-18 | Mike Baldwin | Modify to compute RH over ice as in model +!> 1998-12-16 | Geoff Manikin | undo RH computation over ice +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-06-11 | Mike Baldwin | WRF Version +!> 2013-08-13 | S. Moorthi | Threading +!> 2006-03-19 | Wen Meng | Modify top pressure to 1 pa +!> +!> @author Russ Treadon W/NP2 @date 1992-12-22 + SUBROUTINE CALRH_GFS(P1,T1,Q1,RH) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALRH COMPUTES RELATIVE HUMIDITY -! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-22 -! -! ABSTRACT: -! THIS ROUTINE COMPUTES RELATIVE HUMIDITY GIVEN PRESSURE, -! TEMPERATURE, SPECIFIC HUMIDITY. AN UPPER AND LOWER BOUND -! OF 100 AND 1 PERCENT RELATIVE HUMIDITY IS ENFORCED. WHEN -! THESE BOUNDS ARE APPLIED THE PASSED SPECIFIC HUMIDITY -! ARRAY IS ADJUSTED AS NECESSARY TO PRODUCE THE SET RELATIVE -! HUMIDITY. -! . -! -! PROGRAM HISTORY LOG: -! ??-??-?? DENNIS DEAVEN -! 92-12-22 RUSS TREADON - MODIFIED AS DESCRIBED ABOVE. -! 98-06-08 T BLACK - CONVERSION FROM 1-D TO 2-D -! 98-08-18 MIKE BALDWIN - MODIFY TO COMPUTE RH OVER ICE AS IN MODEL -! 98-12-16 GEOFF MANIKIN - UNDO RH COMPUTATION OVER ICE -! 00-01-04 JIM TUCCILLO - MPI VERSION -! 02-06-11 MIKE BALDWIN - WRF VERSION -! 13-08-13 S. Moorthi - Threading -! 06-03-19 Wen Meng - MODIFY TOP PRESSURE to 1 PA -! -! USAGE: CALL CALRH(P1,T1,Q1,RH) -! INPUT ARGUMENT LIST: -! P1 - PRESSURE (PA) -! T1 - TEMPERATURE (K) -! Q1 - SPECIFIC HUMIDITY (KG/KG) -! -! OUTPUT ARGUMENT LIST: -! RH - RELATIVE HUMIDITY (DECIMAL FORM) -! Q1 - ADJUSTED SPECIFIC HUMIDITY (KG/KG) -! -! OUTPUT FILES: -! NONE -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! LIBRARY: -! NONE -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN -! MACHINE : CRAY C-90 -!$$$ -! use params_mod, only: rhmin use ctlblk_mod, only: ista, iend, jsta, jend, spval !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -390,37 +344,28 @@ END SUBROUTINE CALRH_PW !------------------------------------------------------------------------------------- ! elemental function fpvsnew(t) -!$$$ Subprogram Documentation Block -! -! Subprogram: fpvsnew Compute saturation vapor pressure -! Author: N Phillips w/NMC2X2 Date: 30 dec 82 -! -! Abstract: Compute saturation vapor pressure from the temperature. -! A linear interpolation is done between values in a lookup table -! computed in gpvs. See documentation for fpvsx for details. -! Input values outside table range are reset to table extrema. -! The interpolation accuracy is almost 6 decimal places. -! On the Cray, fpvs is about 4 times faster than exact calculation. -! This function should be expanded inline in the calling routine. -! -! Program History Log: -! 91-05-07 Iredell made into inlinable function -! 94-12-30 Iredell expand table -! 1999-03-01 Iredell f90 module -! 2001-02-26 Iredell ice phase -! -! Usage: pvs=fpvsnew(t) -! -! Input argument list: -! t Real(krealfp) temperature in Kelvin -! -! Output argument list: -! fpvsnew Real(krealfp) saturation vapor pressure in Pascals -! -! Attributes: -! Language: Fortran 90. -! -!$$$ +!> fpvsnew() computes saturation vapor pressure. +!> +!> Compute saturation vapor pressure from the temperature. +!> A linear interpolation is done between values in a lookup table +!> computed in gpvs. See documentation for fpvsx for details. +!> Input values outside table range are reset to table extrema. +!> The interpolation accuracy is almost 6 decimal places. +!> On the Cray, fpvs is about 4 times faster than exact calculation. +!> This function should be expanded inline in the calling routine. +!> +!> @param[in] t Real(krealfp) Temperature in Kelvin. +!> @param[out] fpvsnew Real(krealfp) Saturation vapor pressure in Pascals. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1991-05-07 | Iredell | Initial. Made into inlinable function +!> 1994-12-30 | Iredell | Expand table +!> 1999-03-01 | Iredell | F90 module +!> 2001-02-26 | Iredell | Ice phase +!> +!> @author N Phillips w/NMC2X2 @date 1982-12-30 implicit none integer,parameter:: nxpvs=7501 real,parameter:: con_ttp =2.7316e+2 ! temp at H2O 3pt @@ -490,130 +435,98 @@ elemental function fpvsnew(t) end function fpvsnew ! !------------------------------------------------------------------------------------- -! - +!> calcape() computes CAPE and CINS. +!> +!> This routine computes CAPE and CINS given temperature, +!> pressure, and specific humidty. In "storm and cloud +!> dynamics" (1989, academic press) cotton and anthes define +!> CAPE (equation 9.16, p501) as +!> +!> @code +!> EL +!> CAPE = SUM G * LN(THETAP/THETAA) DZ +!> LCL +!> +!> Where, +!> EL = Equilibrium level, +!> LCL = Lifting condenstation level, +!> G = Gravitational acceleration, +!> THETAP = Lifted parcel potential temperature, +!> THETAA = Ambient potential temperature. +!> @endcode +!> +!> Note that the integrand ln(THETAP/THETAA) approximately +!> equals (THETAP-THETAA)/THETAA. This ratio is often used +!> in the definition of CAPE/CINS. +!> +!> Two types of CAPE/CINS can be computed by this routine. The +!> summation process is the same For both cases. What differs +!> is the definition of the parcel to lift. FOR ITYPE=1 the +!> parcel with the warmest THETA-E in A DPBND pascal layer above +!> the model surface is lifted. the arrays P1D, T1D, and Q1D +!> are not used. For itype=2 the arrays P1D, T1D, and Q1D +!> define the parcel to lift in each column. Both types of +!> CAPE/CINS may be computed in a single execution of the post +!> processor. +!> +!> This algorithm proceeds as follows. +!> For each column, +!> (1) Initialize running CAPE and CINS SUM TO 0.0 +!> (2) Compute temperature and pressure at the LCL using +!> look up table (PTBL). Use either parcel that gives +!> max THETAE in lowest DPBND above ground (ITYPE=1) +!> or given parcel from t1D,Q1D,...(ITYPE=2). +!> (3) Compute the temp of a parcel lifted from the LCL. +!> We know that the parcel's +!> equivalent potential temperature (THESP) remains +!> constant through this process. we can +!> compute tpar using this knowledge using look +!> up table (subroutine TTBLEX). +!> (4) Find the equilibrium level. This is defined as the +!> highest positively buoyant layer. +!> (If there is no positively buoyant layer, CAPE/CINS +!> will be zero) +!> (5) Compute CAPE/CINS. +!> (A) Compute THETAP. We know TPAR and P. +!> (B) Compute THETAA. We know T and P. +!> (6) Add G*(THETAP-THETAA)*DZ to the running CAPE or CINS sum. +!> (A) If THETAP > THETAA, add to the CAPE sum. +!> (B) If THETAP < THETAA, add to the CINS sum. +!> (7) Are we at equilibrium level? +!> (A) If yes, stop the summation. +!> (b) if no, contiunue the summation. +!> (8) Enforce limits on CAPE and CINS (i.e. no negative CAPE) +!> +!> @param[in] ITYPE INTEGER Flag specifying how parcel to lift is identified. See comments above. +!> @param[in] DPBND Depth over which one searches for most unstable parcel. +!> @param[in] P1D Array of pressure of parcels to lift. +!> @param[in] T1D Array of temperature of parcels to lift. +!> @param[in] Q1D Array of specific humidity of parcels to lift. +!> @param[in] L1D Array of model level of parcels to lift. +!> @param[out] CAPE Convective available potential energy (J/kg). +!> @param[out] CINS Convective inhibition (J/kg). +!> @param[out] PPARC Pressure level of parcel lifted when one searches over a particular depth to compute CAPE/CIN. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-02-10 | Russ Treadon | Initial +!> 1993-06-19 | Russ Treadon | Generalized routine to allow for type 2 CAPE/CINS calculations +!> 1994-09-23 | Mike Baldwin | Modified to use look up tables instead of complicated equations +!> 1994-10-13 | Mike Baldwin | Modified to continue CAPE/CINS calc up to at highest buoyant layer +!> 1998-06-12 | T Black | Conversion from 1-D TO 2-D +!> 1998-08-18 | T Black | Compute APE internally +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-01-15 | Mike Baldwin | WRF Version +!> 2003-08-24 | G Manikin | Added level of parcel being lifted as output from the routine and added the depth over which one searches for the most unstable parcel as input +!> 2010-09-09 | G Manikin | Changed computation to use virtual temp added eq lvl hght and thunder parameter +!> 2015-??-?? | S Moorthi | Optimization and threading +!> 2021-07-28 | W Meng | Restrict computation from undefined grids +!> 2021-09-01 | E Colon | Equivalent level height index for RTMA +!> +!> @author Russ Treadon W/NP2 @date 1993-02-10 SUBROUTINE CALCAPE(ITYPE,DPBND,P1D,T1D,Q1D,L1D,CAPE, & CINS,PPARC,ZEQL,THUND) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALCAPE COMPUTES CAPE AND CINS -! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-02-10 -! -! ABSTRACT: -! -! THIS ROUTINE COMPUTES CAPE AND CINS GIVEN TEMPERATURE, -! PRESSURE, AND SPECIFIC HUMIDTY. IN "STORM AND CLOUD -! DYNAMICS" (1989, ACADEMIC PRESS) COTTON AND ANTHES DEFINE -! CAPE (EQUATION 9.16, P501) AS -! -! EL -! CAPE = SUM G * LN(THETAP/THETAA) DZ -! LCL -! -! WHERE, -! EL = EQUILIBRIUM LEVEL, -! LCL = LIFTING CONDENSTATION LEVEL, -! G = GRAVITATIONAL ACCELERATION, -! THETAP = LIFTED PARCEL POTENTIAL TEMPERATURE, -! THETAA = AMBIENT POTENTIAL TEMPERATURE. -! -! NOTE THAT THE INTEGRAND LN(THETAP/THETAA) APPROXIMATELY -! EQUALS (THETAP-THETAA)/THETAA. THIS RATIO IS OFTEN USED -! IN THE DEFINITION OF CAPE/CINS. -! -! TWO TYPES OF CAPE/CINS CAN BE COMPUTED BY THIS ROUTINE. THE -! SUMMATION PROCESS IS THE SAME FOR BOTH CASES. WHAT DIFFERS -! IS THE DEFINITION OF THE PARCEL TO LIFT. FOR ITYPE=1 THE -! PARCEL WITH THE WARMEST THETA-E IN A DPBND PASCAL LAYER ABOVE -! THE MODEL SURFACE IS LIFTED. THE ARRAYS P1D, T1D, AND Q1D -! ARE NOT USED. FOR ITYPE=2 THE ARRAYS P1D, T1D, AND Q1D -! DEFINE THE PARCEL TO LIFT IN EACH COLUMN. BOTH TYPES OF -! CAPE/CINS MAY BE COMPUTED IN A SINGLE EXECUTION OF THE POST -! PROCESSOR. -! -! THIS ALGORITHM PROCEEDS AS FOLLOWS. -! FOR EACH COLUMN, -! (1) INITIALIZE RUNNING CAPE AND CINS SUM TO 0.0 -! (2) COMPUTE TEMPERATURE AND PRESSURE AT THE LCL USING -! LOOK UP TABLE (PTBL). USE EITHER PARCEL THAT GIVES -! MAX THETAE IN LOWEST DPBND ABOVE GROUND (ITYPE=1) -! OR GIVEN PARCEL FROM T1D,Q1D,...(ITYPE=2). -! (3) COMPUTE THE TEMP OF A PARCEL LIFTED FROM THE LCL. -! WE KNOW THAT THE PARCEL'S -! EQUIVALENT POTENTIAL TEMPERATURE (THESP) REMAINS -! CONSTANT THROUGH THIS PROCESS. WE CAN -! COMPUTE TPAR USING THIS KNOWLEDGE USING LOOK -! UP TABLE (SUBROUTINE TTBLEX). -! (4) FIND THE EQUILIBRIUM LEVEL. THIS IS DEFINED AS THE -! HIGHEST POSITIVELY BUOYANT LAYER. -! (IF THERE IS NO POSITIVELY BUOYANT LAYER, CAPE/CINS -! WILL BE ZERO) -! (5) COMPUTE CAPE/CINS. -! (A) COMPUTE THETAP. WE KNOW TPAR AND P. -! (B) COMPUTE THETAA. WE KNOW T AND P. -! (6) ADD G*(THETAP-THETAA)*DZ TO THE RUNNING CAPE OR CINS SUM. -! (A) IF THETAP > THETAA, ADD TO THE CAPE SUM. -! (B) IF THETAP < THETAA, ADD TO THE CINS SUM. -! (7) ARE WE AT EQUILIBRIUM LEVEL? -! (A) IF YES, STOP THE SUMMATION. -! (B) IF NO, CONTIUNUE THE SUMMATION. -! (8) ENFORCE LIMITS ON CAPE AND CINS (I.E. NO NEGATIVE CAPE) -! -! PROGRAM HISTORY LOG: -! 93-02-10 RUSS TREADON -! 93-06-19 RUSS TREADON - GENERALIZED ROUTINE TO ALLOW FOR -! TYPE 2 CAPE/CINS CALCULATIONS. -! 94-09-23 MIKE BALDWIN - MODIFIED TO USE LOOK UP TABLES -! INSTEAD OF COMPLICATED EQUATIONS. -! 94-10-13 MIKE BALDWIN - MODIFIED TO CONTINUE CAPE/CINS CALC -! UP TO AT HIGHEST BUOYANT LAYER. -! 98-06-12 T BLACK - CONVERSION FROM 1-D TO 2-D -! 98-08-18 T BLACK - COMPUTE APE INTERNALLY -! 00-01-04 JIM TUCCILLO - MPI VERSION -! 02-01-15 MIKE BALDWIN - WRF VERSION -! 03-08-24 G MANIKIN - ADDED LEVEL OF PARCEL BEING LIFTED -! AS OUTPUT FROM THE ROUTINE AND ADDED -! THE DEPTH OVER WHICH ONE SEARCHES FOR -! THE MOST UNSTABLE PARCEL AS INPUT -! 10-09-09 G MANIKIN - CHANGED COMPUTATION TO USE VIRTUAL TEMP -! - ADDED EQ LVL HGHT AND THUNDER PARAMETER -! 15-xx-xx S MOORTHI - optimization and threading -! 21-07-28 W Meng - Restrict computation from undefined grids. -! 21-09-01 E COLON - equivalent level height index for RTMA -! -! USAGE: CALL CALCAPE(ITYPE,DPBND,P1D,T1D,Q1D,L1D,CAPE, -! CINS,PPARC) -! INPUT ARGUMENT LIST: -! ITYPE - INTEGER FLAG SPECIFYING HOW PARCEL TO LIFT IS -! IDENTIFIED. SEE COMMENTS ABOVE. -! DPBND - DEPTH OVER WHICH ONE SEARCHES FOR MOST UNSTABLE PARCEL -! P1D - ARRAY OF PRESSURE OF PARCELS TO LIFT. -! T1D - ARRAY OF TEMPERATURE OF PARCELS TO LIFT. -! Q1D - ARRAY OF SPECIFIC HUMIDITY OF PARCELS TO LIFT. -! L1D - ARRAY OF MODEL LEVEL OF PARCELS TO LIFT. -! -! OUTPUT ARGUMENT LIST: -! CAPE - CONVECTIVE AVAILABLE POTENTIAL ENERGY (J/KG) -! CINS - CONVECTIVE INHIBITION (J/KG) -! PPARC - PRESSURE LEVEL OF PARCEL LIFTED WHEN ONE SEARCHES -! OVER A PARTICULAR DEPTH TO COMPUTE CAPE/CIN -! -! OUTPUT FILES: -! STDOUT - RUN TIME STANDARD OUT. -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! BOUND - BOUND (CLIP) DATA BETWEEN UPPER AND LOWER LIMTS. -! TTBLEX - LOOKUP TABLE ROUTINE TO GET T FROM THETAE AND P -! -! LIBRARY: -! COMMON - -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN 90 -! MACHINE : CRAY C-90 -!$$$ -! use vrbls3d, only: pmid, t, q, zint use vrbls2d, only: teql,ieql use masks, only: lmh @@ -992,141 +905,106 @@ SUBROUTINE CALCAPE(ITYPE,DPBND,P1D,T1D,Q1D,L1D,CAPE, & ! END SUBROUTINE CALCAPE ! -!------------------------------------------------------------------------------------- ! +!------------------------------------------------------------------------------------- +!> calcape2() computes CAPE and CINS. +!> +!> This routine computes CAPE and CINS given temperature, +!> pressure, and specific humidty. In "storm and cloud +!> dynamics" (1989, academic press) cotton and anthes define +!> CAPE (equation 9.16, p501) as +!> +!> @code +!> EL +!> CAPE = SUM G * ln(THETAP/THETAA) DZ +!> LCL +!> +!> Where, +!> EL = Equilibrium level, +!> LCL = Lifting condenstation level, +!> G = Gravitational acceleration, +!> THETAP = Lifted parcel potential temperature, +!> THETAA = Ambient potential temperature. +!> @endcode +!> +!> Note that the integrand ln(THETAP/THETAA) approximately +!> equals (THETAP-THETAA)/THETAA. This ratio is often used +!> in the definition of CAPE/CINS. +!> +!> Two types of CAPE/CINS can be computed by this routine. The +!> summation process is the same For both cases. What differs +!> is the definition of the parcel to lift. FOR ITYPE=1 the +!> parcel with the warmest THETA-E in A DPBND pascal layer above +!> the model surface is lifted. the arrays P1D, T1D, and Q1D +!> are not used. For itype=2 the arrays P1D, T1D, and Q1D +!> define the parcel to lift in each column. Both types of +!> CAPE/CINS may be computed in a single execution of the post +!> processor. +!> +!> This algorithm proceeds as follows. +!> For each column, +!> (1) Initialize running CAPE and CINS SUM TO 0.0 +!> (2) Compute temperature and pressure at the LCL using +!> look up table (PTBL). Use either parcel that gives +!> max THETAE in lowest DPBND above ground (ITYPE=1) +!> or given parcel from t1D,Q1D,...(ITYPE=2). +!> (3) Compute the temp of a parcel lifted from the LCL. +!> We know that the parcel's +!> equivalent potential temperature (THESP) remains +!> constant through this process. we can +!> compute tpar using this knowledge using look +!> up table (subroutine TTBLEX). +!> (4) Find the equilibrium level. This is defined as the +!> highest positively buoyant layer. +!> (If there is no positively buoyant layer, CAPE/CINS +!> will be zero) +!> (5) Compute CAPE/CINS. +!> (A) Compute THETAP. We know TPAR and P. +!> (B) Compute THETAA. We know T and P. +!> (6) Add G*(THETAP-THETAA)*DZ to the running CAPE or CINS sum. +!> (A) If THETAP > THETAA, add to the CAPE sum. +!> (B) If THETAP < THETAA, add to the CINS sum. +!> (7) Are we at equilibrium level? +!> (A) If yes, stop the summation. +!> (b) if no, contiunue the summation. +!> (8) Enforce limits on CAPE and CINS (i.e. no negative CAPE) +!> +!> @param[in] ITYPE INTEGER Flag specifying how parcel to lift is identified. See comments above. +!> @param[in] DPBND Depth over which one searches for most unstable parcel. +!> @param[in] P1D Array of pressure of parcels to lift. +!> @param[in] T1D Array of temperature of parcels to lift. +!> @param[in] Q1D Array of specific humidity of parcels to lift. +!> @param[in] L1D Array of model level of parcels to lift. +!> @param[out] CAPE Convective available potential energy (J/kg). +!> @param[out] CINS Convective inhibition (J/kg). +!> @param[out] LFC level of free convection (m). +!> @param[out] ESRHL Lower bound to account for effective helicity calculation. +!> @param[out] ESRHH Upper bound to account for effective helicity calculation. +!> @param[out] DCAPE downdraft CAPE (J/KG). +!> @param[out] DGLD Dendritic growth layer depth (m). +!> @param[out] ESP Enhanced stretching potential. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1993-02-10 | Russ Treadon | Initial +!> 1993-06-19 | Russ Treadon | Generalized routine to allow for type 2 CAPE/CINS calculations +!> 1994-09-23 | Mike Baldwin | Modified to use look up tables instead of complicated equations +!> 1994-10-13 | Mike Baldwin | Modified to continue CAPE/CINS calc up to at highest buoyant layer +!> 1998-06-12 | T Black | Conversion from 1-D TO 2-D +!> 1998-08-18 | T Black | Compute APE internally +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-01-15 | Mike Baldwin | WRF Version +!> 2003-08-24 | G Manikin | Added level of parcel being lifted as output from the routine and added the depth over which one searches for the most unstable parcel as input +!> 2010-09-09 | G Manikin | Changed computation to use virtual temp added eq lvl hght and thunder parameter +!> 2015-??-?? | S Moorthi | Optimization and threading +!> 2021-09-03 | J Meng | Modified to add 0-3km CAPE/CINS, LFC, effective helicity, downdraft CAPE, dendritic growth layer depth, ESP +!> 2021-09-01 | E Colon | Equivalent level height index for RTMA +!> +!> @author Russ Treadon W/NP2 @date 1993-02-10 SUBROUTINE CALCAPE2(ITYPE,DPBND,P1D,T1D,Q1D,L1D, & CAPE,CINS,LFC,ESRHL,ESRHH, & DCAPE,DGLD,ESP) -! SUBROUTINE CALCAPE(ITYPE,DPBND,P1D,T1D,Q1D,L1D,CAPE, & -! CINS,PPARC,ZEQL,THUND) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALCAPE COMPUTES CAPE AND CINS -! PRGRMMR: TREADON ORG: W/NP2 DATE: 93-02-10 -! -! ABSTRACT: -! -! THIS ROUTINE COMPUTES CAPE AND CINS GIVEN TEMPERATURE, -! PRESSURE, AND SPECIFIC HUMIDTY. IN "STORM AND CLOUD -! DYNAMICS" (1989, ACADEMIC PRESS) COTTON AND ANTHES DEFINE -! CAPE (EQUATION 9.16, P501) AS -! -! EL -! CAPE = SUM G * LN(THETAP/THETAA) DZ -! LCL -! -! WHERE, -! EL = EQUILIBRIUM LEVEL, -! LCL = LIFTING CONDENSTATION LEVEL, -! G = GRAVITATIONAL ACCELERATION, -! THETAP = LIFTED PARCEL POTENTIAL TEMPERATURE, -! THETAA = AMBIENT POTENTIAL TEMPERATURE. -! -! NOTE THAT THE INTEGRAND LN(THETAP/THETAA) APPROXIMATELY -! EQUALS (THETAP-THETAA)/THETAA. THIS RATIO IS OFTEN USED -! IN THE DEFINITION OF CAPE/CINS. -! -! TWO TYPES OF CAPE/CINS CAN BE COMPUTED BY THIS ROUTINE. THE -! SUMMATION PROCESS IS THE SAME FOR BOTH CASES. WHAT DIFFERS -! IS THE DEFINITION OF THE PARCEL TO LIFT. FOR ITYPE=1 THE -! PARCEL WITH THE WARMEST THETA-E IN A DPBND PASCAL LAYER ABOVE -! THE MODEL SURFACE IS LIFTED. THE ARRAYS P1D, T1D, AND Q1D -! ARE NOT USED. FOR ITYPE=2 THE ARRAYS P1D, T1D, AND Q1D -! DEFINE THE PARCEL TO LIFT IN EACH COLUMN. BOTH TYPES OF -! CAPE/CINS MAY BE COMPUTED IN A SINGLE EXECUTION OF THE POST -! PROCESSOR. -! -! THIS ALGORITHM PROCEEDS AS FOLLOWS. -! FOR EACH COLUMN, -! (1) INITIALIZE RUNNING CAPE AND CINS SUM TO 0.0 -! (2) COMPUTE TEMPERATURE AND PRESSURE AT THE LCL USING -! LOOK UP TABLE (PTBL). USE EITHER PARCEL THAT GIVES -! MAX THETAE IN LOWEST DPBND ABOVE GROUND (ITYPE=1) -! OR GIVEN PARCEL FROM T1D,Q1D,...(ITYPE=2). -! (3) COMPUTE THE TEMP OF A PARCEL LIFTED FROM THE LCL. -! WE KNOW THAT THE PARCEL'S -! EQUIVALENT POTENTIAL TEMPERATURE (THESP) REMAINS -! CONSTANT THROUGH THIS PROCESS. WE CAN -! COMPUTE TPAR USING THIS KNOWLEDGE USING LOOK -! UP TABLE (SUBROUTINE TTBLEX). -! (4) FIND THE EQUILIBRIUM LEVEL. THIS IS DEFINED AS THE -! HIGHEST POSITIVELY BUOYANT LAYER. -! (IF THERE IS NO POSITIVELY BUOYANT LAYER, CAPE/CINS -! WILL BE ZERO) -! (5) COMPUTE CAPE/CINS. -! (A) COMPUTE THETAP. WE KNOW TPAR AND P. -! (B) COMPUTE THETAA. WE KNOW T AND P. -! (6) ADD G*(THETAP-THETAA)*DZ TO THE RUNNING CAPE OR CINS SUM. -! (A) IF THETAP > THETAA, ADD TO THE CAPE SUM. -! (B) IF THETAP < THETAA, ADD TO THE CINS SUM. -! (7) ARE WE AT EQUILIBRIUM LEVEL? -! (A) IF YES, STOP THE SUMMATION. -! (B) IF NO, CONTIUNUE THE SUMMATION. -! (8) ENFORCE LIMITS ON CAPE AND CINS (I.E. NO NEGATIVE CAPE) -! -! PROGRAM HISTORY LOG: -! 93-02-10 RUSS TREADON -! 93-06-19 RUSS TREADON - GENERALIZED ROUTINE TO ALLOW FOR -! TYPE 2 CAPE/CINS CALCULATIONS. -! 94-09-23 MIKE BALDWIN - MODIFIED TO USE LOOK UP TABLES -! INSTEAD OF COMPLICATED EQUATIONS. -! 94-10-13 MIKE BALDWIN - MODIFIED TO CONTINUE CAPE/CINS CALC -! UP TO AT HIGHEST BUOYANT LAYER. -! 98-06-12 T BLACK - CONVERSION FROM 1-D TO 2-D -! 98-08-18 T BLACK - COMPUTE APE INTERNALLY -! 00-01-04 JIM TUCCILLO - MPI VERSION -! 02-01-15 MIKE BALDWIN - WRF VERSION -! 03-08-24 G MANIKIN - ADDED LEVEL OF PARCEL BEING LIFTED -! AS OUTPUT FROM THE ROUTINE AND ADDED -! THE DEPTH OVER WHICH ONE SEARCHES FOR -! THE MOST UNSTABLE PARCEL AS INPUT -! 10-09-09 G MANIKIN - CHANGED COMPUTATION TO USE VIRTUAL TEMP -! - ADDED EQ LVL HGHT AND THUNDER PARAMETER -! 15-xx-xx S MOORTHI - optimization and threading -! 19-09-03 J MENG - MODIFIED TO ADD 0-3KM CAPE/CINS, LFC, -! EFFECTIVE HELICITY, DOWNDRAFT CAPE, -! DENDRITIC GROWTH LAYER DEPTH, ESP -! 21-09-01 E COLON - equivalent level height index for RTMA -! -! USAGE: CALL CALCAPE2(ITYPE,DPBND,P1D,T1D,Q1D,L1D, & -! CAPE,CINS,LFC,ESRHL,ESRHH, & -! DCAPE,DGLD,ESP) -! -! INPUT ARGUMENT LIST: -! ITYPE - INTEGER FLAG SPECIFYING HOW PARCEL TO LIFT IS -! IDENTIFIED. SEE COMMENTS ABOVE. -! DPBND - DEPTH OVER WHICH ONE SEARCHES FOR MOST UNSTABLE PARCEL -! P1D - ARRAY OF PRESSURE OF PARCELS TO LIFT. -! T1D - ARRAY OF TEMPERATURE OF PARCELS TO LIFT. -! Q1D - ARRAY OF SPECIFIC HUMIDITY OF PARCELS TO LIFT. -! L1D - ARRAY OF MODEL LEVEL OF PARCELS TO LIFT. -! -! OUTPUT ARGUMENT LIST: -! CAPE - CONVECTIVE AVAILABLE POTENTIAL ENERGY (J/KG) -! CINS - CONVECTIVE INHIBITION (J/KG) -! LFC - LEVEL OF FREE CONVECTION (M) -! ESRHL - LOWER BOUND TO ACCOUNT FOR EFFECTIVE HELICITY CALCULATION -! ESRHH - UPPER BOUND TO ACCOUNT FOR EFFECTIVE HELICITY CALCULATION -! DCAPE - DOWNDRAFT CAPE (J/KG) -! DGLD - DENDRITIC GROWTH LAYER DEPTH (M) -! ESP - ENHANCED STRETCHING POTENTIAL -! -! OUTPUT FILES: -! STDOUT - RUN TIME STANDARD OUT. -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! BOUND - BOUND (CLIP) DATA BETWEEN UPPER AND LOWER LIMTS. -! TTBLEX - LOOKUP TABLE ROUTINE TO GET T FROM THETAE AND P -! -! LIBRARY: -! COMMON - -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN 90 -! MACHINE : CRAY C-90 -!$$$ -! use vrbls3d, only: pmid, t, q, zint use vrbls2d, only: fis,ieql use gridspec_mod, only: gridtype @@ -1793,54 +1671,31 @@ end function TVIRTUAL ! !------------------------------------------------------------------------------------- ! -! -!------------------------------------------------------------------------------------- -! - !> @file -! -!> SUBPROGRAM: CALVOR COMPUTES ABSOLUTE VORTICITY -!! PRGRMMR: TREADON ORG: W/NP2 DATE: 92-12-22 -!! -!! ABSTRACT: -!! THIS ROUTINE COMPUTES THE ABSOLUTE VORTICITY. -!! -!! PROGRAM HISTORY LOG: -!! 92-12-22 RUSS TREADON -!! 98-06-08 T BLACK - CONVERSION FROM 1-D TO 2-D -!! 00-01-04 JIM TUCCILLO - MPI VERSION -!! 02-01-15 MIKE BALDWIN - WRF VERSION C-GRID -!! 05-03-01 H CHUANG - ADD NMM E GRID -!! 05-05-17 H CHUANG - ADD POTENTIAL VORTICITY CALCULATION -!! 05-07-07 B ZHOU - ADD RSM IN COMPUTING DVDX, DUDY AND UAVG -!! 13-08-09 S MOORTHI - Optimize the vorticity loop including threading -!! 16-08-05 S Moorthi - add zonal filetering -!! 2019-10-17 Y Mao - Skip calculation when U/V is SPVAL -!! 2020-11-06 J Meng - USE UPP_MATH MODULE -!! 21-09-02 Bo Cui - Decompose UPP in X direction, REPLACE EXCH_F to EXCH -!! 21-10-31 J MENG - 2D DECOMPOSITION -!! -!! USAGE: CALL CALVOR(UWND,VWND,ABSV) -!! INPUT ARGUMENT LIST: -!! UWND - U WIND (M/S) MASS-POINTS -!! VWND - V WIND (M/S) MASS-POINTS -!! -!! OUTPUT ARGUMENT LIST: -!! ABSV - ABSOLUTE VORTICITY (1/S) MASS-POINTS -!! -!! OUTPUT FILES: -!! NONE -!! -!! SUBPROGRAMS CALLED: -!! UTILITIES: -!! NONE -!! LIBRARY: -!! COMMON - CTLBLK -!! -!! ATTRIBUTES: -!! LANGUAGE: FORTRAN -!! MACHINE : WCOSS -!! +!> @brief Subroutine that computes absolute vorticity. +!> +!> This routine computes the absolute vorticity. +!> +!> @param[in] UWND U wind (m/s) mass-points. +!> @param[in] VWND V wind (m/s) mass-points. +!> @param[out] ABSV absolute vorticity (1/s) mass-points. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 1992-12-22 | Russ Treadon | Initial +!> 1998-06-08 | T Black | Convesion from 1-D to 2-D +!> 2000-01-04 | Jim Tuccillo | MPI Version +!> 2002-01-15 | Mike Baldwin | WRF Version C-grid +!> 2005-03-01 | H Chuang | Add NMM E grid +!> 2005-05-17 | H Chuang | Add Potential vorticity calculation +!> 2005-07-07 | B Zhou | Add RSM in computing DVDX, DUDY and UAVG +!> 2013-08-09 | S Moorthi | Optimize the vorticity loop including threading +!> 2016-08-05 | S Moorthi | add zonal filetering +!> 2019-10-17 | Y Mao | Skip calculation when U/V is SPVAL +!> 2020-11-06 | J Meng | Use UPP_MATH Module +!> +!> @author Russ Treadon W/NP2 @date 1992-12-22 SUBROUTINE CALVOR(UWND,VWND,ABSV) ! @@ -2252,44 +2107,23 @@ SUBROUTINE CALVOR(UWND,VWND,ABSV) RETURN END +!> CALDIV computes divergence. +!> +!> For GFS, this routine copmutes the horizontal divergence +!> using 2nd-order centered scheme on a lat-lon grid +!> +!> @param[in] UWND U wind (m/s) mass-points. +!> @param[in] VWND V wind (m/s) mass-points. +!> @param[out] DIV divergence (1/s) mass-points. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2016-05-05 | Sajal Kar | Modified CALVORT to compute divergence from wind components +!> 2016-07-22 | S Moorthi | Modified polar divergence calculation +!> +!> @author Sajal Kar W/NP2 @date 2016-05-05 SUBROUTINE CALDIV(UWND,VWND,DIV) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALDIV COMPUTES DIVERGENCE -! PRGRMMR: SAJAL KAR ORG: W/NP2 DATE: 16-05-05 -! -! ABSTRACT: -! FOR GFS, THIS ROUTINE COMPUTES THE HORIZONTAL DIVERGENCE -! USING 2ND-ORDER CENTERED SCHEME ON A LAT-LON GRID -! -! PROGRAM HISTORY LOG: -! 16-05-05 SAJAL KAR MODIFIED CALVORT TO COMPUTE DIVERGENCE FROM -! WIND COMPONENTS -! 16-07-22 S Moorthi modifying polar divergence calculation -! -! USAGE: CALL CALDIV(UWND,VWND,DIV) -! INPUT ARGUMENT LIST: -! UWND - U WIND (M/S) MASS-POINTS -! VWND - V WIND (M/S) MASS-POINTS -! -! OUTPUT ARGUMENT LIST: -! DIV - DIVERGENCE (1/S) MASS-POINTS -! -! OUTPUT FILES: -! NONE -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! NONE -! LIBRARY: -! COMMON - CTLBLK -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN -! MACHINE : WCOSS -!$$$ -! -! use masks, only: gdlat, gdlon use params_mod, only: d00, dtr, small, erad use ctlblk_mod, only: jsta_2l, jend_2u, spval, modelname, global, & @@ -2560,41 +2394,21 @@ SUBROUTINE CALDIV(UWND,VWND,DIV) END SUBROUTINE CALDIV SUBROUTINE CALGRADPS(PS,PSX,PSY) -!$$$ SUBPROGRAM DOCUMENTATION BLOCK -! . . . -! SUBPROGRAM: CALGRADPS COMPUTES GRADIENTS OF A SCALAR FIELD PS OR LNPS -! PRGRMMR: SAJAL KAR ORG: W/NP2 DATE: 16-05-05 -! -! ABSTRACT: -! FOR GFS, THIS ROUTINE COMPUTES HRIZONTAL GRADIENTS OF PS OR LNPS -! USING 2ND-ORDER CENTERED SCHEME ON A LAT-LON GRID -! -! PROGRAM HISTORY LOG: -! 16-05-05 SAJAL KAR REDUCED FROM CALVORT TO ZONAL AND MERIDIONAL -! GRADIENTS OF GIVEN SURFACE PRESSURE PS, OR LNPS -! -! USAGE: CALL CALGRADPS(PS,PSX,PSY) -! INPUT ARGUMENT LIST: -! PS - SURFACE PRESSURE (PA) MASS-POINTS -! -! OUTPUT ARGUMENT LIST: -! PSX - ZONAL GRADIENT OF PS AT MASS-POINTS -! PSY - MERIDIONAL GRADIENT OF PS AT MASS-POINTS -! -! OUTPUT FILES: -! NONE -! -! SUBPROGRAMS CALLED: -! UTILITIES: -! NONE -! LIBRARY: -! COMMON - CTLBLK -! -! ATTRIBUTES: -! LANGUAGE: FORTRAN -! MACHINE : WCOSS -!$$$ -! +!> CALGRADPS computes gardients of a scalar field PS or LNPS. +!> +!> For GFS, this routine computes horizontal gradients of PS or LNPS. +!> Using 2nd-order centered scheme on a lat-lon grid. +!> +!> @param[in] PS Surface pressure (Pa) mass-points. +!> @param[out] PSX Zonal gradient of PS at mass-points. +!> @param[out] PSY Meridional gradient of PS at mass-points. +!> +!> ### Program history log: +!> Date | Programmer | Comments +!> -----|------------|--------- +!> 2016-05-05 | Sajal Kar | Reduced from CALVORT to zonal and meridional gradients of given surface pressure PS, or LNPS +!> +!> @author Sajal Kar W/NP2 @date 2016-05-05 use masks, only: gdlat, gdlon use params_mod, only: dtr, d00, small, erad use ctlblk_mod, only: jsta_2l, jend_2u, spval, modelname, global, & diff --git a/sorc/ncep_post.fd/VRBLS2D_mod.f b/sorc/ncep_post.fd/VRBLS2D_mod.f index aa3231177..134d014f1 100644 --- a/sorc/ncep_post.fd/VRBLS2D_mod.f +++ b/sorc/ncep_post.fd/VRBLS2D_mod.f @@ -82,7 +82,7 @@ module vrbls2d ,avgesnow(:,:),avgpotevp(:,:),avgprec_cont(:,:),avgcprate_cont(:,:)& ,ti(:,:),aod550(:,:),du_aod550(:,:),ss_aod550(:,:),su_aod550(:,:) & ,bc_aod550(:,:),oc_aod550(:,:),landfrac(:,:),paha(:,:),pahi(:,:) & - ,tecan(:,:),tetran(:,:),tedir(:,:),twa(:,:),fdnsst(:,:) + ,tecan(:,:),tetran(:,:),tedir(:,:),twa(:,:),fdnsst(:,:),pwat(:,:) integer, allocatable :: IVGTYP(:,:),ISLTYP(:,:),ISLOPE(:,:) & ,IEQL(:,:) diff --git a/sorc/ncep_post.fd/WRFPOST.f b/sorc/ncep_post.fd/WRFPOST.f index bcce7e8f1..cedb5eba0 100644 --- a/sorc/ncep_post.fd/WRFPOST.f +++ b/sorc/ncep_post.fd/WRFPOST.f @@ -48,6 +48,7 @@ !! 21-11-03 Tracy Hertneky - Removed SIGIO option !! 22-01-14 W Meng - Remove interfaces INITPOST_GS_NEMS, INITPOST_NEMS_MPIIO !! INITPOST_NMM and INITPOST_GFS_NETCDF. +!! 22-03-15 W Meng - Unify FV3 based interfaces. !! !! USAGE: WRFPOST !! INPUT ARGUMENT LIST: @@ -146,11 +147,11 @@ PROGRAM WRFPOST use CTLBLK_mod, only: filenameaer, me, num_procs, num_servers, mpi_comm_comp, datestr, & mpi_comm_inter, filename, ioform, grib, idat, filenameflux, filenamed3d, gdsdegr, & spldef, modelname, ihrst, lsmdef,vtimeunits, tprec, pthresh, datahandle, im, jm, lm, & - lp1, lm1, im_jm, isf_surface_physics, nsoil, spl, lsmp1, global, & + lp1, lm1, im_jm, isf_surface_physics, nsoil, spl, lsmp1, global, imp_physics, & ista, iend, ista_m, iend_m, ista_2l, iend_2u, & jsta, jend, jsta_m, jend_m, jsta_2l, jend_2u, novegtype, icount_calmict, npset, datapd,& lsm, fld_info, etafld2_tim, eta2p_tim, mdl2sigma_tim, cldrad_tim, miscln_tim, & - mdl2agl_tim, mdl2std_tim, mdl2thandpv_tim, calrad_wcloud_tim, & + mdl2agl_tim, mdl2std_tim, mdl2thandpv_tim, calrad_wcloud_tim, & fixed_tim, time_output, imin, surfce2_tim, komax, ivegsrc, d3d_on, gocart_on,rdaod, & readxml_tim, spval, fullmodelname, submodelname, hyb_sigp, filenameflat, aqfcmaq_on,numx use grib2_module, only: gribit2,num_pset,nrecout,first_grbtbl,grib_info_finalize @@ -242,7 +243,7 @@ PROGRAM WRFPOST if (me==0) print*,'DateStr= ',DateStr if (me==0) print*,'MODELNAME= ',MODELNAME if (me==0) print*,'SUBMODELNAME= ',SUBMODELNAME -! if (me==0) print*,'numx= ',numx + if (me==0) print*,'numx= ',numx ! if(MODELNAME == 'NMM')then ! read(5,1114) VTIMEUNITS ! 1114 format(a4) @@ -338,12 +339,18 @@ PROGRAM WRFPOST print*,'numx= ',numx endif - IF(TRIM(IOFORM) /= 'netcdfpara') THEN + IF(TRIM(IOFORM) /= 'netcdfpara' .AND. TRIM(IOFORM) /= 'netcdf' ) THEN numx=1 if(me == 0) print*,'2D decomposition only supports netcdfpara IO.' if(me == 0) print*,'Reset numx= ',numx ENDIF + IF(MODELNAME /= 'FV3R' .AND. MODELNAME /= 'GFS') THEN + numx=1 + if(me == 0) print*,'2D decomposition only supports GFS and FV3R.' + if(me == 0) print*,'Reset numx= ',numx + ENDIF + ! set up pressure level from POSTGPVARS or DEFAULT if(kpo == 0) then ! use default pressure levels @@ -387,7 +394,7 @@ PROGRAM WRFPOST PTHRESH = 0.000001 end if !Chuang: add dynamical allocation - IF(TRIM(IOFORM) == 'netcdf') THEN + if(TRIM(IOFORM) == 'netcdf' .OR. TRIM(IOFORM) == 'netcdfpara') THEN IF(MODELNAME == 'NCAR' .OR. MODELNAME == 'RAPR' .OR. MODELNAME == 'NMM') THEN call ext_ncd_ioinit(SysDepInfo,Status) print*,'called ioinit', Status @@ -431,14 +438,16 @@ PROGRAM WRFPOST call ext_ncd_ioclose ( DataHandle, Status ) ELSE -! use netcdf lib directly to read FV3 output in netCDF +! use parallel netcdf lib directly to read FV3 output in netCDF spval = 9.99e20 - Status = nf90_open(trim(fileName),NF90_NOWRITE, ncid3d) + Status = nf90_open(trim(fileName),IOR(NF90_NOWRITE,NF90_MPIIO), & + ncid3d,comm=mpi_comm_world,info=mpi_info_null) if ( Status /= 0 ) then print*,'error opening ',fileName, ' Status = ', Status stop endif - Status = nf90_open(trim(fileNameFlux),NF90_NOWRITE, ncid2d) + Status = nf90_open(trim(fileNameFlux),IOR(NF90_NOWRITE,NF90_MPIIO), & + ncid2d,comm=mpi_comm_world,info=mpi_info_null) if ( Status /= 0 ) then print*,'error opening ',fileNameFlux, ' Status = ', Status stop @@ -459,6 +468,13 @@ PROGRAM WRFPOST endif if(me==0)print*,'SF_SURFACE_PHYSICS= ',iSF_SURFACE_PHYSICS if(me==0)print*,'NSOIL= ',NSOIL +! read imp_physics + Status=nf90_get_att(ncid2d,nf90_global,'imp_physics',imp_physics) + if(Status/=0)then + print*,'imp_physics not found; assigning to GFDL 11' + imp_physics=11 + endif + if (me == 0) print*,'MP_PHYSICS= ',imp_physics ! get dimesions Status = nf90_inq_dimid(ncid3d,'grid_xt',varid) if ( Status /= 0 ) then @@ -499,53 +515,6 @@ PROGRAM WRFPOST print*,'im jm lm nsoil from fv3 output = ',im,jm,lm,nsoil END IF -! use netcdf_parallel lib directly to read FV3 output in netCDF - ELSE IF(TRIM(IOFORM) == 'netcdfpara') THEN - spval = 9.99e20 - Status = nf90_open(trim(fileName),ior(nf90_nowrite, nf90_mpiio), & - ncid3d, comm=mpi_comm_world, info=mpi_info_null) - if ( Status /= 0 ) then - print*,'error opening ',fileName, ' Status = ', Status - stop - endif -! get dimesions - Status = nf90_inq_dimid(ncid3d,'grid_xt',varid) - if ( Status /= 0 ) then - print*,Status,varid - STOP 1 - end if - Status = nf90_inquire_dimension(ncid3d,varid,len=im) - if ( Status /= 0 ) then - print*,Status - STOP 1 - end if - Status = nf90_inq_dimid(ncid3d,'grid_yt',varid) - if ( Status /= 0 ) then - print*,Status,varid - STOP 1 - end if - Status = nf90_inquire_dimension(ncid3d,varid,len=jm) - if ( Status /= 0 ) then - print*,Status - STOP 1 - end if - Status = nf90_inq_dimid(ncid3d,'pfull',varid) - if ( Status /= 0 ) then - print*,Status,varid - STOP 1 - end if - Status = nf90_inquire_dimension(ncid3d,varid,len=lm) - if ( Status /= 0 ) then - print*,Status - STOP 1 - end if - LP1 = LM+1 - LM1 = LM-1 - IM_JM = IM*JM -! set NSOIL to 4 as default for NOAH but change if using other -! SFC scheme - NSOIL = 4 - print*,'im jm lm nsoil from fv3 output = ',im,jm,lm,nsoil ELSE IF(TRIM(IOFORM) == 'binary' .OR. & TRIM(IOFORM) == 'binarympiio' ) THEN @@ -649,22 +618,18 @@ PROGRAM WRFPOST ! Reading model output for different models and IO format - IF(TRIM(IOFORM) == 'netcdf') THEN + IF(TRIM(IOFORM) == 'netcdf' .OR. TRIM(IOFORM) == 'netcdfpara') THEN IF(MODELNAME == 'NCAR' .OR. MODELNAME == 'RAPR') THEN print*,'CALLING INITPOST TO PROCESS NCAR NETCDF OUTPUT' CALL INITPOST - ELSE IF (MODELNAME == 'FV3R') THEN -! use netcdf library to read output directly + ELSE IF (MODELNAME == 'FV3R' .OR. MODELNAME == 'GFS') THEN +! use parallel netcdf library to read output directly print*,'CALLING INITPOST_NETCDF' CALL INITPOST_NETCDF(ncid2d,ncid3d) ELSE PRINT*,'POST does not have netcdf option for model,',MODELNAME,' STOPPING,' STOP 9998 END IF -! use netcdf_parallel library to read fv3 output - ELSE IF(TRIM(IOFORM) == 'netcdfpara') THEN - print*,'CALLING INITPOST_GFS_NETCDF_PARA' - CALL INITPOST_GFS_NETCDF_PARA(ncid3d) ELSE IF(TRIM(IOFORM) == 'binarympiio') THEN IF(MODELNAME == 'NCAR' .OR. MODELNAME == 'RAPR' .OR. MODELNAME == 'NMM') THEN print*,'WRF BINARY IO FORMAT IS NO LONGER SUPPORTED, STOPPING' diff --git a/tests/compile_upp.sh b/tests/compile_upp.sh index 7e9a9d310..2c20f660c 100755 --- a/tests/compile_upp.sh +++ b/tests/compile_upp.sh @@ -7,12 +7,13 @@ set -eu usage() { echo - echo "Usage: $0 [-g] [-w] -h" + echo "Usage: $0 [-p] [-g] [-w] [-v] [-c] -h" echo echo " -p installation prefix DEFAULT: ../install" - echo " -g Build with GTG(users with gtg repos. access only) DEFAULT: OFF" - echo " -w Build without WRF-IO DEFAULT: ON" - echo " -v Build with cmake verbose DEFAULT: NO" + echo " -g build with GTG(users with gtg repos. access only) DEFAULT: OFF" + echo " -w build without WRF-IO DEFAULT: ON" + echo " -v build with cmake verbose DEFAULT: NO" + echo " -c Compiler to use for build DEFAULT: intel" echo " -h display this message and quit" echo exit 1 @@ -21,8 +22,9 @@ usage() { prefix="../install" gtg_opt=" -DBUILD_WITH_GTG=OFF" wrfio_opt=" -DBUILD_WITH_WRFIO=ON" +compiler="intel" verbose_opt="" -while getopts ":p:gwvh" opt; do +while getopts ":p:gwc:vh" opt; do case $opt in p) prefix=$OPTARG @@ -33,6 +35,9 @@ while getopts ":p:gwvh" opt; do w) wrfio_opt=" -DBUILD_WITH_WRFIO=OFF" ;; + c) + compiler=$OPTARG + ;; v) verbose_opt="VERBOSE=1" ;; @@ -43,7 +48,6 @@ while getopts ":p:gwvh" opt; do done cmake_opts=" -DCMAKE_INSTALL_PREFIX=$prefix"${wrfio_opt}${gtg_opt} -hostname source ./detect_machine.sh if [[ $(uname -s) == Darwin ]]; then readonly MYDIR=$(cd "$(dirname "$(greadlink -f -n "${BASH_SOURCE[0]}" )" )" && pwd -P) @@ -60,7 +64,17 @@ if [[ $MACHINE_ID != "unknown" ]]; then module purge fi module use $PATHTR/modulefiles - modulefile=${MACHINE_ID} + if [[ $compiler == "intel" ]]; then + modulefile=${MACHINE_ID} + else + modulefile=${MACHINE_ID}_${compiler} + fi + if [ -f "${PATHTR}/modulefiles/${modulefile}" -o -f "${PATHTR}/modulefiles/${modulefile}.lua" ]; then + echo "Building for machine ${MACHINE_ID}, compiler ${compiler}" + else + echo "Modulefile does not exist for machine ${MACHINE_ID}, compiler ${compiler}" + exit 1 + fi module load $modulefile module list fi