diff --git a/modulefiles/gsi_jet.lua b/modulefiles/gsi_jet.lua index ddb255bc1f..855597a08e 100644 --- a/modulefiles/gsi_jet.lua +++ b/modulefiles/gsi_jet.lua @@ -1,26 +1,32 @@ help([[ ]]) -load("cmake/3.20.1") +prepend_path("MODULEPATH", "/mnt/lfs4/HFIP/hfv3gfs/role.epic/hpc-stack/libs/intel-18.0.5.274/modulefiles/stack") -prepend_path("MODULEPATH", "/contrib/anaconda/modulefiles") +local hpc_ver=os.getenv("hpc_ver") or "1.2.0" +local hpc_intel_ver=os.getenv("hpc_intel_ver") or "18.0.5.274" +local hpc_impi_ver=os.getenv("hpc_impi_ver") or "2018.4.274" +local cmake_ver=os.getenv("cmake_ver") or "3.20.1" +local anaconda_ver=os.getenv("anaconda_ver") or "5.3.1" +local prod_util_ver=os.getenv("prod_util_ver") or "1.2.2" -load("anaconda/5.3.1") +load(pathJoin("hpc", hpc_ver)) +load(pathJoin("hpc-intel", hpc_intel_ver)) +load(pathJoin("hpc-impi", hpc_impi_ver)) +load(pathJoin("cmake", cmake_ver)) -prepend_path("MODULEPATH", "/lfs4/HFIP/hfv3gfs/nwprod/hpc-stack/libs/modulefiles/stack") +prepend_path("MODULEPATH", "/contrib/anaconda/modulefiles") -load("hpc/1.1.0") -load("hpc-intel/18.0.5.274") -load("hpc-impi/2018.4.274") +load(pathJoin("anaconda", anaconda_ver)) load("gsi_common") -local prod_util_ver=os.getenv("prod_util_ver") or "1.2.2" load(pathJoin("prod_util", prod_util_ver)) pushenv("CFLAGS", "-axSSE4.2,AVX,CORE-AVX2") pushenv("FFLAGS", "-axSSE4.2,AVX,CORE-AVX2") -pushenv("GSI_BINARY_SOURCE_DIR", "/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/gsi/20221128") + +pushenv("GSI_BINARY_SOURCE_DIR", "/mnt/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/gsi/20221128") whatis("Description: GSI environment on Jet with Intel Compilers") diff --git a/regression/global_3dvar.sh b/regression/global_3dvar.sh index 145cb6212c..56f78ad384 100755 --- a/regression/global_3dvar.sh +++ b/regression/global_3dvar.sh @@ -294,7 +294,7 @@ for type in $listdiag; do date=`echo $diag_file | cut -d'.' -f2` $UNCOMPRESS $diag_file fnameanl=$(echo $fname|sed 's/_ges//g') - mv $fname.$date $fnameanl + mv ${fname}.${date} $fnameanl done # Run GSI diff --git a/regression/global_4dvar.sh b/regression/global_4dvar.sh index ec2a8396e0..cac0d28e6b 100755 --- a/regression/global_4dvar.sh +++ b/regression/global_4dvar.sh @@ -98,7 +98,7 @@ SINGLEOB="" # bftab_sst= bufr table for sst ONLY needed for sst retrieval (retrieval=.true.) # aeroinfo = text file with information about assimilation of aerosol data -anavinfo=$fixgsi/global_anavinfo.l${LEVS}.txt +anavinfo=$fixgsi/global_anavinfo_qlqi.l${LEVS}.txt berror=$fixgsi/Big_Endian/global_berror.l${LEVS}y${NLAT}.f77 locinfo=$fixgsi/global_hybens_info.l${LEVS}.txt satinfo=$fixgsi/global_satinfo.txt @@ -311,9 +311,9 @@ $gsi_namelist EOF cp gsiparm.anl gsiparm.anl.obsvr -echo "run gsi now" +echo "run gsi observer" eval "$APRUN $tmpdir/gsi.x < gsiparm.anl > stdout.obsvr 2>&1" -rc=$? +ra=$? # Run gsi identity model 4dvar under Parallel Operating Environment (poe) on NCEP IBM rm -f siganl sfcanl.gsi satbias_out fort.2* @@ -331,8 +331,8 @@ cat < gsiparm.anl $gsi_namelist EOF -echo "run gsi now" +echo "run gsi 4dvar" eval "$APRUN $tmpdir/gsi.x < gsiparm.anl > stdout 2>&1" -rc=$? - +rb=$? +rc=$((ra+rb)) exit $rc diff --git a/regression/regression_driver.sh b/regression/regression_driver.sh index 621ccbf485..e1d3b18dc7 100755 --- a/regression/regression_driver.sh +++ b/regression/regression_driver.sh @@ -47,7 +47,6 @@ for jn in `seq ${RSTART} ${REND}`; do $scripts/regression_wait.sh ${job[$jn]} ${rcname} $check_resource rc=$? if [ $rc -ne 0 ]; then - rm -f ${rcname} exit 1 fi done diff --git a/regression/regression_param.sh b/regression/regression_param.sh index 85f2949bfe..6024dbdb54 100755 --- a/regression/regression_param.sh +++ b/regression/regression_param.sh @@ -53,8 +53,8 @@ case $regtest in topts[1]="0:15:00" ; popts[1]="12/5/" ; ropts[1]="/1" topts[2]="0:15:00" ; popts[2]="12/9/" ; ropts[2]="/2" elif [[ "$machine" = "Jet" ]]; then - topts[1]="0:50:00" ; popts[1]="12/3/" ; ropts[1]="/1" - topts[2]="0:50:00" ; popts[2]="12/9/" ; ropts[2]="/2" + topts[1]="0:15:00" ; popts[1]="12/5/" ; ropts[1]="/1" + topts[2]="0:15:00" ; popts[2]="12/9/" ; ropts[2]="/2" elif [[ "$machine" = "Cheyenne" ]]; then topts[1]="0:30:00" ; popts[1]="16/2/" ; ropts[1]="/1" topts[2]="0:30:00" ; popts[2]="16/4/" ; ropts[2]="/2" @@ -123,8 +123,8 @@ case $regtest in topts[1]="0:10:00" ; popts[1]="12/8/" ; ropts[1]="/1" topts[2]="0:10:00" ; popts[2]="12/12/" ; ropts[2]="/2" elif [[ "$machine" = "Jet" ]]; then - topts[1]="0:35:00" ; popts[1]="6/8/" ; ropts[1]="/1" - topts[2]="0:35:00" ; popts[2]="6/10/" ; ropts[2]="/2" + topts[1]="0:10:00" ; popts[1]="12/8/" ; ropts[1]="/1" + topts[2]="0:10:00" ; popts[2]="12/10/" ; ropts[2]="/2" elif [[ "$machine" = "Discover" ]]; then topts[1]="0:30:00" ; popts[1]="48/2" ; ropts[1]="/1" topts[2]="0:30:00" ; popts[2]="60/3" ; ropts[2]="/2" @@ -150,11 +150,11 @@ case $regtest in topts[1]="0:15:00" ; popts[1]="20/1/" ; ropts[1]="/1" topts[2]="0:15:00" ; popts[2]="20/2/" ; ropts[2]="/1" elif [[ "$machine" = "Orion" ]]; then - topts[1]="0:15:00" ; popts[1]="4/4/" ; ropts[1]="/1" - topts[2]="0:15:00" ; popts[2]="6/6/" ; ropts[2]="/1" + topts[1]="0:15:00" ; popts[1]="20/1/" ; ropts[1]="/1" + topts[2]="0:15:00" ; popts[2]="20/2/" ; ropts[2]="/1" elif [[ "$machine" = "Jet" ]]; then - topts[1]="0:15:00" ; popts[1]="4/4/" ; ropts[1]="/1" - topts[2]="0:15:00" ; popts[2]="6/6/" ; ropts[2]="/1" + topts[1]="0:15:00" ; popts[1]="20/1/" ; ropts[1]="/1" + topts[2]="0:15:00" ; popts[2]="20/2/" ; ropts[2]="/1" elif [[ "$machine" = "wcoss2" ]]; then topts[1]="0:15:00" ; popts[1]="64/1/" ; ropts[1]="/1" topts[2]="0:15:00" ; popts[2]="128/2/" ; ropts[2]="/1" @@ -255,8 +255,8 @@ case $regtest in topts[1]="0:10:00" ; popts[1]="12/3/" ; ropts[1]="/1" topts[2]="0:10:00" ; popts[2]="12/5/" ; ropts[2]="/2" elif [[ "$machine" = "Jet" ]]; then - topts[1]="0:15:00" ; popts[1]="12/3/" ; ropts[1]="/1" - topts[2]="0:15:00" ; popts[2]="12/5/" ; ropts[2]="/2" + topts[1]="0:10:00" ; popts[1]="12/3/" ; ropts[1]="/1" + topts[2]="0:10:00" ; popts[2]="12/5/" ; ropts[2]="/2" elif [[ "$machine" = "Cheyenne" ]]; then topts[1]="0:15:00" ; popts[1]="16/2/" ; ropts[1]="/1" topts[2]="0:15:00" ; popts[2]="16/4/" ; ropts[2]="/2" diff --git a/regression/regression_test.sh b/regression/regression_test.sh index deb34ff244..0bcb9f4d90 100755 --- a/regression/regression_test.sh +++ b/regression/regression_test.sh @@ -576,11 +576,20 @@ mkdir -p $vfydir $ncp $output $vfydir/ +# Final check for any failed tests +count=$(grep -i "fail" $output |wc -l) +if [ $count -gt 0 ]; then + (( failed_test = $failed_test + $count )) +fi + +# Remove job log files is no failures detected cd $scripts -rm -f ${exp1}.out -rm -f ${exp2}.out -rm -f ${exp3}.out -rm -f ${exp2_scale}.out +if [ $count -eq 0 ]; then + rm -f ${exp1}.out + rm -f ${exp2}.out + rm -f ${exp3}.out + rm -f ${exp2_scale}.out +fi if [[ "$clean" = ".true." ]]; then rm -rf $savdir diff --git a/regression/regression_test_enkf.sh b/regression/regression_test_enkf.sh index 213ee726da..f52a5d451f 100755 --- a/regression/regression_test_enkf.sh +++ b/regression/regression_test_enkf.sh @@ -34,7 +34,7 @@ maxtime=1200 maxmem=${maxmem:-3400000} # set in regression_param maxmem=$((${memnode:-64}*1024*1024)) -# Copy stdout and sanl files +# Copy stdout and incr files # from $savdir to $tmpdir list="$exp1 $exp2 $exp3" for exp in $list; do @@ -43,7 +43,7 @@ for exp in $list; do imem=1 while [[ $imem -le $nmem ]]; do member="_mem"`printf %03i $imem` - $ncp $savdir/$exp/sanl_${global_adate}_fhr06$member $tmpdir/sanl$member.$exp + $ncp $savdir/$exp/incr_${global_adate}_fhr06$member $tmpdir/incr$member.$exp (( imem = $imem + 1 )) done done @@ -282,10 +282,13 @@ nmem=10 imem=1 while [[ $imem -le $nmem ]]; do member="_mem"`printf %03i $imem` - if ! cmp -s sanl$member.${exp1} sanl$member.${exp2} -then - echo 'sanl'$member'.'${exp1}' sanl'$member'.'${exp2}' are NOT identical' -fi + ncdump incr$member.${exp1} > incr$member.${exp1}.out + ncdump incr$member.${exp2} > incr$member.${exp2}.out + if [ ! diff incr$member.${exp1}.out incr$member.${exp2}.out ]; then + echo 'incr'$member'.'${exp1}' incr'$member'.'${exp2}' are NOT identical' + else + rm -f incr$member.${exp1}.out incr$member.${exp2}.out + fi (( imem = $imem + 1 )) done echo @@ -379,11 +382,14 @@ else imem=1 while [[ $imem -le $nmem ]]; do member="_mem"`printf %03i $imem` - if ! cmp -s sanl$member.${exp1} sanl$member.${exp3} - then - echo 'sanl'$member'.'${exp1}' sanl'$member'.'${exp3}' are NOT identical' + ncdump incr$member.${exp1} > incr$member.${exp1}.out + ncdump incr$member.${exp3} > incr$member.${exp3}.out + if [ ! diff incr$member.${exp1}.out incr$member.${exp3}.out ]; then + echo 'incr'$member'.'${exp1}' incr'$member'.'${exp3}' are NOT identical' + else + rm -f incr$member.${exp1}.out incr$member.${exp3}.out fi - (( imem = $imem + 1 )) + (( imem = $imem + 1 )) done echo } >> $output @@ -411,11 +417,20 @@ mkdir -p $vfydir $ncp $output $vfydir/ +# Final check for any failed tests +count=$(grep -i "fail" $output |wc -l) +if [ $count -gt 0 ]; then + (( failed_test = $failed_test + $count )) +fi + +# Remove job log files is no failures detected cd $scripts -rm -f ${exp1}.out -rm -f ${exp2}.out -rm -f ${exp3}.out -rm -f ${exp2_scale}.out +if [ $count -eq 0 ]; then + rm -f ${exp1}.out + rm -f ${exp2}.out + rm -f ${exp3}.out + rm -f ${exp2_scale}.out +fi if [[ "$clean" = ".true." ]]; then rm -rf $savdir diff --git a/regression/regression_var.sh b/regression/regression_var.sh index 250317f405..98083c8587 100755 --- a/regression/regression_var.sh +++ b/regression/regression_var.sh @@ -33,7 +33,7 @@ if [[ -d /glade ]]; then # Cheyenne export machine="Cheyenne" elif [[ -d /scratch1 ]]; then # Hera export machine="Hera" -elif [[ -d /jetmon ]]; then # Jet +elif [[ -d /mnt/lfs4 || -d /jetmon || -d /mnt/lfs1 ]]; then # Jet export machine="Jet" elif [[ -d /discover ]]; then # NCCS Discover export machine="Discover" @@ -138,19 +138,16 @@ case $machine in export noscrub=/lfs1/NESDIS/nesdis-rdo2/$LOGNAME/noscrub export ptmp=/lfs1/NESDIS/nesdis-rdo2/$LOGNAME/ptmp - export fixcrtm="/lfs1/NESDIS/nesdis-rdo2/David.Huber/save/CRTM_REL-2.2.3/crtm_v2.2.3/fix_update" - export casesdir="/lfs1/NESDIS/nesdis-rdo2/David.Huber/save/CASES" + export casesdir="/lfs1/NESDIS/nesdis-rdo2/David.Huber/save/CASES/regtest" export check_resource="no" export accnt="nesdis-rdo2" export group="global" export queue="batch" if [[ "$cmaketest" = "false" ]]; then - export basedir="/lfs1/NESDIS/nesdis-rdo2/$LOGNAME/gsi" + export basedir="/lfs1/NESDIS/nesdis-rdo2/$LOGNAME/save/git/gsi" fi - export ptmp="/lfs1/NESDIS/nesdis-rdo2/$LOGNAME/ptmp/$ptmpName" - # On Jet, there are no scrubbers to remove old contents from stmp* directories. # After completion of regression tests, will remove the regression test subdirecories export clean=".true." diff --git a/src/enkf/gridinfo_fv3reg.f90 b/src/enkf/gridinfo_fv3reg.f90 index 53e5f5b3de..ef5b242901 100644 --- a/src/enkf/gridinfo_fv3reg.f90 +++ b/src/enkf/gridinfo_fv3reg.f90 @@ -43,7 +43,8 @@ module gridinfo ! !$$$ -use mpisetup, only: nproc, mpi_integer, mpi_real4, mpi_comm_world,mpi_status +use mpisetup, only: nproc, mpi_integer, mpi_real4,mpi_status +use mpimod, only: mpi_comm_world use params, only: datapath,nlevs,nlons,nlats,use_gfs_nemsio, fgfileprefixes, & fv3fixpath, nx_res,ny_res, ntiles,l_fv3reg_filecombined,paranc, & fv3_io_layout_nx,fv3_io_layout_ny diff --git a/src/gsi/apply_scaledepwgts.f90 b/src/gsi/apply_scaledepwgts.f90 new file mode 100644 index 0000000000..62c455e011 --- /dev/null +++ b/src/gsi/apply_scaledepwgts.f90 @@ -0,0 +1,173 @@ +!$$$ program documentation block +! +! program history: +! +! 2018-03-28 T. Lei and D. Kleist - consoliated and added codes +! for the scale dependent scale localization scheme +! +!$$$ end documentation block + +function fwgtofwvlen (rvlft,rvrgt,rcons,rlen,rinput) +!$$$ subprogram documentation block +! +! subprogram: fwgtofwvlen +! +! abstract: Calculation of spectral filter functions +! +!$$$ end documentation block + + use kinds, only: r_kind,i_kind,r_single + implicit none + + real(r_kind),intent(in) :: rvlft,rvrgt,rcons,rlen,rinput + real(r_kind) :: fwgtofwvlen + real(r_kind) :: rlen1,rtem1,rconshalf + + rlen1=rlen/10.0_r_kind ! rlen corresponds to a (-5,5) region + rconshalf=0.5_r_kind*rcons + if(rinput > rvlft .and. rinput < rvrgt) then + fwgtofwvlen=rcons + else + rtem1=min(abs(rinput-rvlft),abs(rinput-rvrgt)) + fwgtofwvlen=rconshalf*(1.0_r_kind+tanh(5.0_r_kind-rtem1/rlen1)) + endif + +end function fwgtofwvlen +! . . . . +subroutine init_mult_spc_wgts(jcap_in) +!$$$ subprogram documentation block +! +! subprogram: init_mult_spc_wgts +! +!$$$ end documentation block + + use kinds, only: r_kind,i_kind,r_single + use hybrid_ensemble_parameters,only: s_ens_hv,sp_loc,grd_ens,grd_loc,sp_ens + use hybrid_ensemble_parameters,only: n_ens,p_sploc2ens,grd_sploc + use hybrid_ensemble_parameters,only: use_localization_grid + use gridmod,only: use_sp_eqspace + use general_specmod, only: general_init_spec_vars + use constants, only: zero,half,one,two,three,rearth,pi + use constants, only: rad2deg + use mpimod, only: mype + use general_sub2grid_mod, only: general_sub2grid_create_info + use egrid2agrid_mod,only: g_create_egrid2agrid + use general_sub2grid_mod, only: sub2grid_info + use gsi_io, only: verbose + use hybrid_ensemble_parameters, only: nsclgrp + use hybrid_ensemble_parameters, only: spc_multwgt,spcwgt_params,i_ensloccov4scl + implicit none + + integer(i_kind),intent(in ) :: jcap_in + real(r_kind),allocatable :: totwvlength(:) + + integer(i_kind) i,ii,j,k,l,n,kk,nsigend + integer(i_kind) ig + real(r_kind) rwv0,rtem1,rtem2 + real (r_kind):: fwgtofwvlen + integer(i_kind) :: l_sum_spc_weights + + ! Spectral scale decomposition is differernt between SDL-cross and SDL-nocross + if( i_ensloccov4scl == 1 )then + l_sum_spc_weights = 1 + else + l_sum_spc_weights = 0 + end if + + allocate(totwvlength(jcap_in)) + + rwv0=2*pi*rearth*0.001_r_kind + do i=1,jcap_in + totwvlength(i)= rwv0/real(i) + enddo + do i=1,jcap_in + rtem1=0 + do ig=1,nsclgrp + if(ig /= 2) then + spc_multwgt(i,ig)=fwgtofwvlen(spcwgt_params(1,ig),spcwgt_params(2,ig),& + spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength(i)) + if(l_sum_spc_weights == 0 ) then + rtem1=rtem1+spc_multwgt(i,ig) + else + rtem1=rtem1+spc_multwgt(i,ig)*spc_multwgt(i,ig) + endif + endif + enddo + rtem2 =1.0_r_kind - rtem1 + if(abs(rtem2) >= zero) then + + if(l_sum_spc_weights == 0 ) then + spc_multwgt(i,2)=rtem2 + else + spc_multwgt(i,2)=sqrt(rtem2) + endif + endif + enddo + spc_multwgt=max(spc_multwgt,0.0_r_kind) + + deallocate(totwvlength) + return +end subroutine init_mult_spc_wgts + +subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2) +! +! Program history log: +! 2017-03-30 J. Kay, X. Wang - copied from Kleist's apply_scaledepwgts and +! add the calculation of scale-dependent weighting for mixed resolution ensemble +! POC: xuguang.wang@ou.edu +! + use constants, only: one + use control_vectors, only: nrf_var,cvars2d,cvars3d,control_vector + use kinds, only: r_kind,i_kind + use kinds, only: r_single + use mpimod, only: mype,nvar_id,levs_id + use hybrid_ensemble_parameters, only: oz_univ_static + use general_specmod, only: general_spec_multwgt + use gsi_bundlemod, only: gsi_bundle + use general_sub2grid_mod, only: general_sub2grid,general_grid2sub + use general_specmod, only: spec_vars + use general_sub2grid_mod, only: sub2grid_info + use mpimod, only: mpi_comm_world,mype,npe,ierror + use file_utility, only : get_lun + implicit none + +! Declare passed variables + type(gsi_bundle),intent(in) :: wbundle + type(gsi_bundle),intent(inout) :: wbundle2 + type(spec_vars),intent (in):: sp_in + type(sub2grid_info),intent(in)::grd_in + real(r_kind),dimension(0:sp_in%jcap),intent(in):: spwgts + +! Declare local variables + integer(i_kind) ii,kk + integer(i_kind) i,j,lunit + + real(r_kind),dimension(grd_in%lat2,grd_in%lon2):: slndt,sicet,sst + real(r_kind),dimension(grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc) :: hwork + real(r_kind),dimension(grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc) :: work + real(r_kind),dimension(sp_in%nc):: spc1 + character*64 :: fname1 + character*5:: varname1 + +! Beta1 first +! Get from subdomains to + call general_sub2grid(grd_in,wbundle%values,hwork) + work=reshape(hwork,(/grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc/)) + + do kk=1,grd_in%nlevs_alloc +! Transform from physical space to spectral space + call general_g2s0(grd_in,sp_in,spc1,work(:,:,kk)) + +! Apply spectral weights + call general_spec_multwgt(sp_in,spc1,spwgts) +! Transform back to physical space + call general_s2g0(grd_in,sp_in,spc1,work(:,:,kk)) + + end do + +! Transfer work back to subdomains + hwork=reshape(work,(/grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc/)) + call general_grid2sub(grd_in,hwork,wbundle2%values) + + return +end subroutine apply_scaledepwgts diff --git a/src/gsi/cplr_get_fv3_regional_ensperts.f90 b/src/gsi/cplr_get_fv3_regional_ensperts.f90 index f99ca39790..6e94b29c6c 100644 --- a/src/gsi/cplr_get_fv3_regional_ensperts.f90 +++ b/src/gsi/cplr_get_fv3_regional_ensperts.f90 @@ -8,14 +8,18 @@ module get_fv3_regional_ensperts_mod procedure, pass(this) :: get_fv3_regional_ensperts => get_fv3_regional_ensperts_run procedure, pass(this) :: ens_spread_dualres_regional => ens_spread_dualres_regional_fv3_regional procedure, pass(this) :: general_read_fv3_regional + procedure, pass(this) :: general_read_fv3_regional_parallel_over_ens + procedure, pass(this) :: parallel_read_fv3_step2 + procedure, nopass :: fill_regional_2d end type get_fv3_regional_ensperts_class - type(sub2grid_info):: grd_fv3lam_ens_dynvar_io_nouv,grd_fv3lam_ens_tracer_io_nouv,grd_fv3lam_ens_uv + type(sub2grid_info):: grd_fv3lam_ens_dynvar_io_nouv,grd_fv3lam_ens_tracer_io_nouv,grd_fv3lam_ens_uv,grd_fv3lam_ens_phyvar_io_nouv character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_ens_io_dynmetvars3d_nouv ! copy of cvars3d excluding uv 3-d fields character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_ens_io_tracermetvars3d_nouv ! copy of cvars3d excluding uv 3-d fields character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_ens_io_dynmetvars2d_nouv character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_ens_io_tracermetvars2d_nouv + character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_ens_io_phymetvars3d_nouv contains subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) @@ -46,8 +50,8 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) use kinds, only: r_kind,i_kind,r_single use constants, only: zero,one,half,zero_single,rd_over_cp,one_tenth - use mpimod, only: mpi_comm_world,ierror,mype - use hybrid_ensemble_parameters, only: n_ens,grd_ens + use mpimod, only: mpi_comm_world,ierror,mype,npe + use hybrid_ensemble_parameters, only: n_ens,grd_ens,parallelization_over_ensmembers use hybrid_ensemble_parameters, only: l_both_fv3sar_gfs_ens, n_ens_gfs,n_ens_fv3sar use hybrid_ensemble_parameters, only: ntlevs_ens,ensemble_path use control_vectors, only: cvars2d,cvars3d,nc2d,nc3d @@ -69,6 +73,8 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) use gsi_rfv3io_mod, only: fv3lam_io_dynmetvars2d_nouv,fv3lam_io_tracermetvars2d_nouv use netcdf , only: nf90_open, nf90_close,nf90_nowrite,nf90_inquire,nf90_format_netcdf4 use netcdf_mod , only: nc_check + use gsi_rfv3io_mod, only: fv3lam_io_phymetvars3d_nouv + use obsmod, only: if_model_dbz implicit none @@ -79,7 +85,11 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig):: u,v,tv,oz,rh real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2):: ps - real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)::w,ql,qi,qr,qg,qs,qnr + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)::w,ql,qi,qr,qg,qs,qnr,dbz + real(r_kind),dimension(:,:,:),allocatable :: gg_u,gg_v,gg_tv,gg_rh + real(r_kind),dimension(:,:,:),allocatable :: gg_w,gg_dbz,gg_qr,gg_qs, & + gg_qi,gg_qg,gg_oz,gg_cwmr + real(r_kind),dimension(:,:),allocatable :: gg_ps real(r_single),pointer,dimension(:,:,:):: w3 =>NULL() real(r_single),pointer,dimension(:,:):: w2 =>NULL() @@ -96,9 +106,9 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) integer(i_kind):: i,j,k,n,mm1,istatus integer(i_kind):: ndynvario2d,ntracerio2d - integer(r_kind):: ndynvario3d,ntracerio3d + integer(r_kind):: ndynvario3d,ntracerio3d,nphyvario3d integer(i_kind):: inner_vars,numfields - integer(i_kind):: ilev,ic2,ic3 + integer(i_kind):: ilev,ic2,ic3,iope integer(i_kind):: m integer(i_kind)::loc_id,ncfmt @@ -125,8 +135,10 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) !clt setup varnames for IO ndynvario2d=0 ntracerio2d=0 + nphyvario3d=0 ndynvario3d=size(fv3lam_io_dynmetvars3d_nouv) ntracerio3d=size(fv3lam_io_tracermetvars3d_nouv) + nphyvario3d=size(fv3lam_io_phymetvars3d_nouv) if (allocated(fv3lam_io_dynmetvars2d_nouv))then ndynvario2d=size(fv3lam_io_dynmetvars2d_nouv) endif @@ -137,6 +149,10 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) allocate(fv3lam_ens_io_tracermetvars3d_nouv(ndynvario3d)) fv3lam_ens_io_dynmetvars3d_nouv=fv3lam_io_dynmetvars3d_nouv fv3lam_ens_io_tracermetvars3d_nouv=fv3lam_io_tracermetvars3d_nouv + if ( nphyvario3d > 0 )then + allocate(fv3lam_ens_io_phymetvars3d_nouv(nphyvario3d)) + fv3lam_ens_io_phymetvars3d_nouv=fv3lam_io_phymetvars3d_nouv + end if if (ndynvario2d > 0 ) then allocate(fv3lam_ens_io_dynmetvars2d_nouv(ndynvario2d)) fv3lam_ens_io_dynmetvars2d_nouv=fv3lam_io_dynmetvars2d_nouv @@ -168,6 +184,24 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) call general_sub2grid_create_info(grd_fv3lam_ens_dynvar_io_nouv,inner_vars,grd_ens%nlat,& grd_ens%nlon,grd_ens%nsig,numfields,regional,names=names,lnames=lnames) + if( nphyvario3d > 0 )then + inner_vars=1 + numfields=inner_vars*(nphyvario3d*grd_ens%nsig) + deallocate(lnames,names) + allocate(lnames(1,numfields),names(1,numfields)) + ilev=1 + do i=1,nphyvario3d + do k=1,grd_ens%nsig + lnames(1,ilev)=k + names(1,ilev)=fv3lam_ens_io_phymetvars3d_nouv(i) + ilev=ilev+1 + enddo + enddo + + call general_sub2grid_create_info(grd_fv3lam_ens_phyvar_io_nouv,inner_vars,grd_ens%nlat,& + grd_ens%nlon,grd_ens%nsig,numfields,regional,names=names,lnames=lnames) + end if + inner_vars=1 numfields=inner_vars*(ntracerio3d*grd_ens%nsig+ntracerio2d) deallocate(lnames,names) @@ -254,16 +288,73 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) mm1=mype+1 kap1=rd_over_cp+one kapr=one/rd_over_cp + + if( parallelization_over_ensmembers ) then + if(n_ens_fv3sar>npe) then + parallelization_over_ensmembers=.false. +130 format('Disabling parallelization_over_ensmembers because number of ensemble members (',I0,') is greater than number of MPI ranks (',I0,').') + if(mype==0) then + write(6,130) n_ens_fv3sar,npe + endif + endif + endif ! parallelization_over_ensmembers + + if(parallelization_over_ensmembers .and. mype==0) then + write(6,'(I0,A)') mype,': will read ensemble data in parallel (parallelization_over_ensmembers=.true.)' + endif + + if( parallelization_over_ensmembers )then + do n=1,n_ens_fv3sar + write(ensfilenam_str,22) trim(adjustl(ensemble_path)),ens_fhrlevs(m),n +22 format(a,'fv3SAR',i2.2,'_ens_mem',i3.3) + iope=(n-1)*npe/n_ens_fv3sar + ! DEFINE INPUT FILE NAME + fv3_filename%grid_spec=trim(ensfilenam_str)//'-fv3_grid_spec' + fv3_filename%ak_bk=trim(ensfilenam_str)//'-fv3_akbk' + fv3_filename%dynvars=trim(ensfilenam_str)//'-fv3_dynvars' + fv3_filename%tracers=trim(ensfilenam_str)//"-fv3_tracer" + fv3_filename%phyvars=trim(ensfilenam_str)//'-fv3_phyvars' + fv3_filename%sfcdata=trim(ensfilenam_str)//"-fv3_sfcdata" + fv3_filename%couplerres=trim(ensfilenam_str)//"-coupler.res" + + + if( mype==iope) then + allocate(gg_u(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_v(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_tv(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_rh(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_oz(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_ps(grd_ens%nlat,grd_ens%nlon)) + if ( .not. if_model_dbz ) then + call this%general_read_fv3_regional_parallel_over_ens(iope,fv3_filename,gg_ps,gg_u,gg_v,gg_tv,gg_rh,gg_oz) + else + allocate(gg_w(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_dbz(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_qr(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_qs(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_qi(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_qg(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + allocate(gg_cwmr(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig)) + call this%general_read_fv3_regional_parallel_over_ens(iope,fv3_filename,gg_ps,gg_u,gg_v,gg_tv,gg_rh,gg_oz, & + g_ql=gg_cwmr,g_qi=gg_qi,g_qr=gg_qr,g_qs=gg_qs,g_qg=gg_qg,g_w=gg_w,g_dbz=gg_dbz) + end if + end if + end do + if(mype==0) then + write(6,'(I0,A)') mype,': reading ensemble data in parallel is done (parallelization_over_ensmembers=.true.)' + endif + end if + call MPI_Barrier(mpi_comm_world,ierror) ! ! LOOP OVER ENSEMBLE MEMBERS do n_fv3sar=1,n_ens_fv3sar n=n_ens_gfs+n_fv3sar write(ensfilenam_str,22) trim(adjustl(ensemble_path)),ens_fhrlevs(m),n_fv3sar -22 format(a,'fv3SAR',i2.2,'_ens_mem',i3.3) ! DEFINE INPUT FILE NAME fv3_filename%grid_spec=trim(ensfilenam_str)//'-fv3_grid_spec' !exmaple thinktobe fv3_filename%ak_bk=trim(ensfilenam_str)//'-fv3_akbk' fv3_filename%dynvars=trim(ensfilenam_str)//'-fv3_dynvars' + fv3_filename%phyvars=trim(ensfilenam_str)//'-fv3_phyvars' fv3_filename%tracers=trim(ensfilenam_str)//"-fv3_tracer" fv3_filename%sfcdata=trim(ensfilenam_str)//"-fv3_sfcdata" fv3_filename%couplerres=trim(ensfilenam_str)//"-coupler.res" @@ -299,15 +390,52 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) endif ! ! READ ENEMBLE MEMBERS DATA - if (mype == 0) write(6,'(a,a)') & - 'CALL READ_FV3_REGIONAL_ENSPERTS FOR ENS DATA with the filename str : ',trim(ensfilenam_str) - if (.not.l_use_dbz_directDA ) then ! Read additional hydrometers and w for dirZDA - call this%general_read_fv3_regional(fv3_filename,ps,u,v,tv,rh,oz) - else - call this%general_read_fv3_regional(fv3_filename,ps,u,v,tv,rh,oz, & - g_ql=ql,g_qi=qi,g_qr=qr,g_qs=qs,g_qg=qg,g_qnr=qnr,g_w=w) + if( .not. parallelization_over_ensmembers )then + if (mype == 0) write(6,'(a,a)') & + 'CALL READ_FV3_REGIONAL_ENSPERTS FOR ENS DATA with the filename str : ',trim(ensfilenam_str) + if (.not. (l_use_dbz_directDA .or. if_model_dbz) ) then ! Read additional hydrometers and w for dirZDA + call this%general_read_fv3_regional(fv3_filename,ps,u,v,tv,rh,oz) + else + if( l_use_dbz_directDA ) then + call this%general_read_fv3_regional(fv3_filename,ps,u,v,tv,rh,oz, & + g_ql=ql,g_qi=qi,g_qr=qr,g_qs=qs,g_qg=qg,g_qnr=qnr,g_w=w) + else if( if_model_dbz )then + call this%general_read_fv3_regional(fv3_filename,ps,u,v,tv,rh,oz, & + g_ql=ql,g_qi=qi,g_qr=qr,g_qs=qs,g_qg=qg,g_qnr=qnr,g_w=w,g_dbz=dbz) + end if + end if end if + if( parallelization_over_ensmembers )then + iope=(n_fv3sar-1)*npe/n_ens_fv3sar + if(mype==iope) then + write(0,'(I0,A,I0,A)') mype,': scatter member ',n_fv3sar,' to other ranks...' + if( if_model_dbz )then + call this%parallel_read_fv3_step2(mype,iope,& + g_ps=ps,g_u=u,g_v=v,g_tv=tv,g_rh=rh,g_ql=ql,& + g_oz=oz,g_w=w,g_qr=qr,g_qs=qs,g_qi=qi,g_qg=qg,g_dbz=dbz,& + gg_ps=gg_ps,gg_tv=gg_tv,gg_u=gg_u,gg_v=gg_v,& + gg_rh=gg_rh,gg_w=gg_w,gg_dbz=gg_dbz,gg_qr=gg_qr,& + gg_qs=gg_qs,gg_qi=gg_qi,gg_qg=gg_qg,gg_ql=gg_cwmr) + else + call this%parallel_read_fv3_step2(mype,iope,& + g_ps=ps,g_u=u,g_v=v,g_tv=tv,g_rh=rh,g_ql=ql,g_oz=oz, & + gg_ps=gg_ps,gg_tv=gg_tv,gg_u=gg_u,gg_v=gg_v,gg_rh=gg_rh) + end if + else + if( if_model_dbz )then + call this%parallel_read_fv3_step2(mype,iope,& + g_ps=ps,g_u=u,g_v=v,g_tv=tv,g_rh=rh,g_ql=ql,& + g_oz=oz,g_w=w,g_qr=qr,g_qs=qs,g_qi=qi,g_qg=qg,g_dbz=dbz) + else + call this%parallel_read_fv3_step2(mype,iope,& + g_ps=ps,g_u=u,g_v=v,g_tv=tv,g_rh=rh,g_ql=ql,g_oz=oz) + endif + endif + + call MPI_Barrier(mpi_comm_world,ierror) + end if + ! SAVE ENSEMBLE MEMBER DATA IN COLUMN VECTOR do ic3=1,nc3d @@ -463,6 +591,16 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) end do end do end do + + case('dbz','DBZ') + do k=1,grd_ens%nsig + do i=1,grd_ens%lon2 + do j=1,grd_ens%lat2 + w3(j,i,k) = dbz(j,i,k) + x3(j,i,k)=x3(j,i,k)+dbz(j,i,k) + end do + end do + end do end select @@ -572,7 +710,7 @@ subroutine get_fv3_regional_ensperts_run(this,en_perts,nelen,ps_bar) end subroutine get_fv3_regional_ensperts_run subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g_rh,g_oz, & - g_ql,g_qi,g_qr,g_qs,g_qg,g_qnr,g_w) + g_ql,g_qi,g_qr,g_qs,g_qg,g_qnr,g_w,g_dbz) !$$$ subprogram documentation block ! first compied from general_read_arw_regional . . . . ! subprogram: general_read_fv3_regional read fv3sar model ensemble members @@ -623,7 +761,7 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g use hybrid_ensemble_parameters, only: grd_ens use directDA_radaruse_mod, only: l_use_cvpqx, cvpqx_pval, cld_nt_updt use directDA_radaruse_mod, only: l_cvpnr, cvpnr_pval - + use obsmod, only:if_model_dbz implicit none @@ -632,7 +770,7 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g class(get_fv3_regional_ensperts_class), intent(inout) :: this type (type_fv3regfilenameg) , intent (in) :: fv3_filenameginput real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),intent(out)::g_u,g_v,g_tv,g_rh,g_oz - real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),optional,intent(out)::g_ql,g_qi,g_qr + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),optional,intent(out)::g_ql,g_qi,g_qr,g_dbz real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),optional,intent(out)::g_qs,g_qg,g_qnr,g_w real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2),intent(out):: g_ps @@ -656,11 +794,13 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g character(len=24),parameter :: myname_ = 'general_read_fv3_regional' type(gsi_bundle) :: gsibundle_fv3lam_ens_dynvar_nouv type(gsi_bundle) :: gsibundle_fv3lam_ens_tracer_nouv + type(gsi_bundle) :: gsibundle_fv3lam_ens_phyvar_nouv type(gsi_grid):: grid_ens character(len=:),allocatable :: grid_spec !='fv3_grid_spec' character(len=:),allocatable :: ak_bk !='fv3_akbk' character(len=:),allocatable :: dynvars !='fv3_dynvars' + character(len=:),allocatable :: phyvars !='fv3_phyvars' character(len=:),allocatable :: tracers !='fv3_tracer' character(len=:),allocatable :: sfcdata !='fv3_sfcdata' character(len=:),allocatable :: couplerres!='coupler.res' @@ -676,6 +816,7 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g grid_spec=fv3_filenameginput%grid_spec ak_bk=fv3_filenameginput%ak_bk dynvars=fv3_filenameginput%dynvars + phyvars=fv3_filenameginput%phyvars tracers=fv3_filenameginput%tracers sfcdata=fv3_filenameginput%sfcdata couplerres=fv3_filenameginput%couplerres @@ -701,7 +842,10 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g endif - + if(allocated(fv3lam_ens_io_phymetvars3d_nouv))then + call gsi_bundlecreate(gsibundle_fv3lam_ens_phyvar_nouv,grid_ens,'gsibundle_fv3lam_ens_phyvar_nouv',istatus, & + names3d=fv3lam_ens_io_phymetvars3d_nouv) + end if if(fv3sar_ensemble_opt == 0 ) then @@ -714,6 +858,10 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g fv3_filenameginput%dynvars,fv3_filenameginput) call gsi_fv3ncdf_read(grd_fv3lam_ens_tracer_io_nouv,gsibundle_fv3lam_ens_tracer_nouv,& fv3_filenameginput%tracers,fv3_filenameginput) + if( if_model_dbz ) then + call gsi_fv3ncdf_read(grd_fv3lam_ens_phyvar_io_nouv,gsibundle_fv3lam_ens_phyvar_nouv,& + fv3_filenameginput%phyvars,fv3_filenameginput) + end if else call gsi_fv3ncdf_read_v1(grd_fv3lam_ens_dynvar_io_nouv,gsibundle_fv3lam_ens_dynvar_nouv,& fv3_filenameginput%dynvars,fv3_filenameginput) @@ -724,14 +872,17 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_dynvar_nouv, 'tsen' ,g_tsen ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'q' ,g_q ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'oz' ,g_oz ,istatus );ier=ier+istatus - if (l_use_dbz_directDA) then + if (l_use_dbz_directDA .or. if_model_dbz) then call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'ql' ,g_ql ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'qi' ,g_qi ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'qr' ,g_qr ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'qs' ,g_qs ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'qg' ,g_qg ,istatus );ier=ier+istatus + if (l_use_dbz_directDA) & call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_tracer_nouv, 'qnr',g_qnr ,istatus );ier=ier+istatus call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_dynvar_nouv, 'w' , g_w ,istatus );ier=ier+istatus + if( if_model_dbz )& + call GSI_Bundlegetvar ( gsibundle_fv3lam_ens_phyvar_nouv, 'dbz' , g_dbz ,istatus );ier=ier+istatus end if @@ -834,11 +985,339 @@ subroutine general_read_fv3_regional(this,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g enddo call gsi_bundledestroy(gsibundle_fv3lam_ens_dynvar_nouv) call gsi_bundledestroy(gsibundle_fv3lam_ens_tracer_nouv) - + call gsi_bundledestroy(gsibundle_fv3lam_ens_phyvar_nouv) return end subroutine general_read_fv3_regional + subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenameginput,g_ps,g_u,g_v,g_tv,g_rh,g_oz, & + g_ql,g_qi,g_qr,g_qs,g_qg,g_qnr,g_w,g_dbz) + !$$$ subprogram documentation block + ! first compied from general_read_arw_regional . . . . + ! subprogram: general_read_fv3_regional read fv3sar model ensemble members + ! prgmmr: Ting org: emc/ncep date: 2018 + ! + ! abstract: read ensemble members from the fv3sar model in "restart" or "cold start" netcdf format + ! for use with hybrid ensemble option. + ! + ! program history log: + ! 2018- Ting - intial versions + ! 2022-04-01 Y. Wang and X. Wang - read all fields for each member for + ! parallel ensemble IO capability + ! poc: xuguang.wang@ou.edu + ! + ! input argument list: + ! + ! output argument list: + ! + ! attributes: + ! language: f90 + ! machine: ibm RS/6000 SP + ! + !$$$ end documentation block + + use netcdf, only: nf90_nowrite + use netcdf, only: nf90_open,nf90_close + use netcdf, only: nf90_inq_dimid,nf90_inquire_dimension + use netcdf, only: nf90_inq_varid,nf90_inquire_variable,nf90_get_var + use kinds, only: r_kind,r_single,i_kind + use gridmod, only: eta1_ll,eta2_ll + use constants, only: zero,one,fv,zero_single,one_tenth,h300 + use hybrid_ensemble_parameters, only: grd_ens,q_hyb_ens + use hybrid_ensemble_parameters, only: fv3sar_ensemble_opt + + use mpimod, only: mpi_comm_world,mpi_rtype,mype + use netcdf_mod, only: nc_check + use gsi_rfv3io_mod,only: type_fv3regfilenameg + use gsi_rfv3io_mod,only:n2d + use constants, only: half,zero + use gsi_rfv3io_mod, only: gsi_fv3ncdf_read + use gsi_rfv3io_mod, only: gsi_fv3ncdf_read_v1 + use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv + use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv_v1 + use gsi_rfv3io_mod, only: gsi_fv3ncdf2d_read_v1 + use directDA_radaruse_mod, only: l_use_dbz_directDA + use gsi_bundlemod, only: gsi_gridcreate + use gsi_bundlemod, only: gsi_grid + use gsi_bundlemod, only: gsi_bundlecreate,gsi_bundledestroy + use gsi_bundlemod, only: gsi_bundlegetvar + use obsmod, only: if_model_dbz + use directDA_radaruse_mod, only: l_use_cvpqx, cvpqx_pval, cld_nt_updt + use directDA_radaruse_mod, only: l_cvpnr, cvpnr_pval + use gsi_rfv3io_mod, only: gsi_fv3ncdf_read_ens_parallel_over_ens,gsi_fv3ncdf_readuv_ens_parallel_over_ens + + + + implicit none +! +! Declare passed variables + class(get_fv3_regional_ensperts_class), intent(inout) :: this + integer(i_kind), intent (in) :: iope + type (type_fv3regfilenameg) , intent (in) :: fv3_filenameginput + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig),intent(out)::g_u,g_v,g_tv,g_rh,g_oz + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig),optional,intent(out)::g_ql,g_qi,g_qr,g_dbz + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig),optional,intent(out)::g_qs,g_qg,g_qnr,g_w + + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon),intent(out):: g_ps + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig+1) ::g_prsi + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig) ::g_prsl ,g_tsen,g_q,g_delp +! +! Declare local parameters + real(r_kind),parameter:: r0_01 = 0.01_r_kind + real(r_kind),parameter:: r10 = 10.0_r_kind + real(r_kind),parameter:: r100 = 100.0_r_kind + ! +! Declare local variables + + integer(i_kind):: i,j,k,kp + + integer(i_kind) iderivative + + + logical ice + + character(len=24),parameter :: myname_ = 'general_read_fv3_regional' + + character(len=:),allocatable :: grid_spec !='fv3_grid_spec' + character(len=:),allocatable :: ak_bk !='fv3_akbk' + character(len=:),allocatable :: dynvars !='fv3_dynvars' + character(len=:),allocatable :: phyvars !='fv3_phyvars' + character(len=:),allocatable :: tracers !='fv3_tracer' + character(len=:),allocatable :: sfcdata !='fv3_sfcdata' + character(len=:),allocatable :: couplerres!='coupler.res' + integer (i_kind) ier,istatus + + + associate( this => this ) ! eliminates warning for unused dummy argument needed for binding + end associate + + if( mype == iope )then + grid_spec=fv3_filenameginput%grid_spec + ak_bk=fv3_filenameginput%ak_bk + dynvars=fv3_filenameginput%dynvars + phyvars=fv3_filenameginput%phyvars + tracers=fv3_filenameginput%tracers + sfcdata=fv3_filenameginput%sfcdata + couplerres=fv3_filenameginput%couplerres + + if(fv3sar_ensemble_opt == 0 ) then + call gsi_fv3ncdf_readuv_ens_parallel_over_ens(g_u,g_v,fv3_filenameginput,iope) + else + write(6,*) "Warning: we can only grab fields from restart files not cold start files for ensemble!" + endif + + if(fv3sar_ensemble_opt == 0) then + if (if_model_dbz) then + call gsi_fv3ncdf_read_ens_parallel_over_ens(fv3_filenameginput%dynvars,fv3_filenameginput,delp=g_delp,tsen=g_tsen,w=g_w,iope=iope) + call gsi_fv3ncdf_read_ens_parallel_over_ens(fv3_filenameginput%tracers,fv3_filenameginput,q=g_q,oz=g_oz,ql=g_ql,qr=g_qr,& + qs=g_qs,qi=g_qi,qg=g_qg,iope=iope) + call gsi_fv3ncdf_read_ens_parallel_over_ens(fv3_filenameginput%phyvars,fv3_filenameginput,dbz=g_dbz,iope=iope) + else + call gsi_fv3ncdf_read_ens_parallel_over_ens(fv3_filenameginput%dynvars,fv3_filenameginput,delp=g_delp,tsen=g_tsen,iope=iope) + call gsi_fv3ncdf_read_ens_parallel_over_ens(fv3_filenameginput%tracers,fv3_filenameginput,q=g_q,oz=g_oz,iope=iope) + end if + else + write(6,*) "Warning: we can only grab fields from restart files not cold start files for ensemble!" + endif + + + if (fv3sar_ensemble_opt == 0) then + g_prsi(:,:,grd_ens%nsig+1)=eta1_ll(grd_ens%nsig+1) !thinkto be done , should use eta1_ll from ensemble grid + do i=grd_ens%nsig,1,-1 + g_prsi(:,:,i)=g_delp(:,:,i)*0.001_r_kind+g_prsi(:,:,i+1) + enddo + g_ps(:,:)=g_prsi(:,:,1) + + endif + + !! tsen2tv !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + do k=1,grd_ens%nsig + do j=1,grd_ens%nlon + do i=1,grd_ens%nlat + g_tv(i,j,k)=g_tsen(i,j,k)*(one+fv*g_q(i,j,k)) + enddo + enddo + enddo + if (.not.q_hyb_ens) then + ice=.true. + iderivative=0 + do k=1,grd_ens%nsig + kp=k+1 + do j=1,grd_ens%nlon + do i=1,grd_ens%nlat + g_prsl(i,j,k)=(g_prsi(i,j,k)+g_prsi(i,j,kp))*half + end do + end do + end do + call genqsat(g_rh,g_tsen(1,1,1),g_prsl(1,1,1),grd_ens%nlat,grd_ens%nlon,grd_ens%nsig,ice,iderivative) + do k=1,grd_ens%nsig + do j=1,grd_ens%nlon + do i=1,grd_ens%nlat + g_rh(i,j,k) = g_q(i,j,k)/g_rh(i,j,k) + end do + end do + end do + else + do k=1,grd_ens%nsig + do j=1,grd_ens%nlon + do i=1,grd_ens%nlat + g_rh(i,j,k) = g_q(i,j,k) + end do + end do + end do + end if + end if ! mype + + return + end subroutine general_read_fv3_regional_parallel_over_ens + + + subroutine parallel_read_fv3_step2(this,mype,iope, & + g_ps,g_u,g_v,g_tv,g_rh,g_ql,g_oz,g_w,g_qr,g_qs,g_qi,& + g_qg,g_dbz, & + gg_ps,gg_tv,gg_u,gg_v,gg_rh,gg_w,gg_dbz,gg_qr,& + gg_qs,gg_qi,gg_qg,gg_ql) + + !$$$ subprogram documentation block + ! . + ! subprogram: parallel_read_fv3_step2 distribute all fields into all tasks + ! prgmmr: Y. Wang and X. Wang org: OU/MAP date: 2022-04-01 + ! + ! abstract: All fields have been read in by general_read_fv3_regional_parallel_over_ens. + ! Different tasks contain the data from different members. + ! This program will divided the full-domain fields into subdomains + ! and assign them to all tasks. poc: xuguang.wang@ou.edu + ! + ! program history log: + ! + ! 2022-04-01 Y. Wang and X. Wang - Changed from the code for WRF-ARW + ! + ! input argument list: + ! + ! output argument list: + ! + ! attributes: + ! language: f90 + ! machine: ibm RS/6000 SP + ! + !$$$ end documentation block + + use hybrid_ensemble_parameters, only: grd_ens + use mpimod, only: mpi_comm_world,ierror,mpi_rtype + use kinds, only: r_kind,r_single,i_kind + use gridmod,only: itotsub + + implicit none + + ! + ! Declare passed variables + class(get_fv3_regional_ensperts_class), intent(inout) :: this + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),intent(out):: & + g_u,g_v,g_tv,g_rh,g_ql,g_oz + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig),intent(out),optional::& + g_w,g_qr,g_qs,g_qi,g_qg,g_dbz + integer(i_kind), intent(in) :: mype, iope + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2),intent(out):: g_ps + + ! The gg_ arrays are only sent by the rank doing I/O (mype==iope) + real(r_kind),optional,dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig) :: & + gg_u,gg_v,gg_tv,gg_rh + + real(r_kind),optional,dimension(grd_ens%nlat,grd_ens%nlon,grd_ens%nsig) :: & + gg_w,gg_dbz,gg_qr,gg_qs,gg_qi,gg_qg,gg_ql + real(r_kind),optional,dimension(grd_ens%nlat,grd_ens%nlon):: gg_ps + + ! Declare local variables + real(r_kind),allocatable,dimension(:):: wrk_send_2d + integer(i_kind) :: k + + ! transfer data from root to subdomains on each task + ! scatterv used, since full grids exist only on root task. + allocate(wrk_send_2d(grd_ens%itotsub)) + ! first PS (output from fill_regional_2d is a column vector with a halo) + if(mype==iope) call this%fill_regional_2d(gg_ps,wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_ps,grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + ! then TV,U,V,RH + do k=1,grd_ens%nsig + if (mype==iope) then + call this%fill_regional_2d(gg_tv(:,:,k),wrk_send_2d) + endif + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_tv(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_u(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_u(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_v(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_v(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_rh(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_rh(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if( present(g_dbz) )then + if (mype==iope) call this%fill_regional_2d(gg_w(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_w(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_dbz(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype,& + g_dbz(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_qr(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype,& + g_qr(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_qs(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype,& + g_qs(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_qi(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype,& + g_qi(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_qg(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & + g_qg(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + if (mype==iope) call this%fill_regional_2d(gg_ql(1,1,k),wrk_send_2d) + call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype,& + g_ql(1,1,k),grd_ens%ijn_s(mype+1),mpi_rtype,iope,mpi_comm_world,ierror) + end if + enddo + deallocate(wrk_send_2d) + end subroutine parallel_read_fv3_step2 + + subroutine fill_regional_2d(fld_in,fld_out) + !$$$ subprogram documentation block + ! . . . . + ! subprogram: fill_regional_2d + ! prgmmr: mizzi org: ncar/mmm date: 2010-08-11 + ! + ! abstract: create a column vector for the subdomain (including halo) + ! from global 2d grid. + ! + ! + ! program history log: + ! 2010-08-11 parrish, initial documentation + ! 2012-03-12 whitaker, remove nx,ny,itotsub from argument list. + ! + ! input argument list: + ! + ! output argument list: + ! + ! attributes: + ! language: f90 + ! machine: ibm RS/6000 SP + ! + !$$$ end documentation block + use kinds, only: r_kind,i_kind + use hybrid_ensemble_parameters, only: grd_ens + implicit none + real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon)::fld_in + real(r_kind),dimension(grd_ens%itotsub)::fld_out + integer(i_kind):: i,j,k + do k=1,grd_ens%itotsub + i=grd_ens%ltosj_s(k) + j=grd_ens%ltosi_s(k) + fld_out(k)=fld_in(j,i) + enddo + return + end subroutine fill_regional_2d + subroutine ens_spread_dualres_regional_fv3_regional(this,mype,en_perts,nelen) !$$$ subprogram documentation block ! . . . . diff --git a/src/gsi/general_specmod.f90 b/src/gsi/general_specmod.f90 index 439e26e431..20feae98de 100644 --- a/src/gsi/general_specmod.f90 +++ b/src/gsi/general_specmod.f90 @@ -64,6 +64,7 @@ module general_specmod ! set subroutines to public public :: general_init_spec_vars public :: general_destroy_spec_vars + public :: general_spec_multwgt ! set passed variables to public public :: spec_vars public :: spec_cut @@ -306,6 +307,36 @@ subroutine general_init_spec_vars(sp,jcap,jcap_test,nlat_a,nlon_a,eqspace) return end subroutine general_init_spec_vars + subroutine general_spec_multwgt(sp,spcwrk,spcwgt) +! 2017-03-30 J. Kay, X. Wang - add general_spec_multwgt for scale-dependent +! weighting of mixed resolution ensemble, +! POC: xuguang.wang@ou.edu +! + implicit none + type(spec_vars),intent(in) :: sp + real(r_kind),dimension(sp%nc),intent(inout) :: spcwrk + real(r_kind),dimension(0:sp%jcap),intent(in) :: spcwgt + + integer(i_kind) ii1,l,m,jmax,ks,n + +!! Code borrowed from spvar in splib + jmax=sp%jcap + + do n=0,jmax + ks=2*n + spcwrk(ks+1)=spcwrk(ks+1)*spcwgt(n) + end do + do n=0,jmax + do l=MAX(1,n-jmax),MIN(n,jmax) + ks=l*(2*jmax+(-1)*(l-1))+2*n + spcwrk(ks+1) = spcwrk(ks+1)*spcwgt(n) + spcwrk(ks+2) = spcwrk(ks+2)*spcwgt(n) + end do + end do + + return + end subroutine general_spec_multwgt + subroutine general_destroy_spec_vars(sp) !$$$ subprogram documentation block ! . . . . diff --git a/src/gsi/get_gefs_ensperts_dualres.f90 b/src/gsi/get_gefs_ensperts_dualres.f90 index fa3d0ecbdd..1cb7586a89 100644 --- a/src/gsi/get_gefs_ensperts_dualres.f90 +++ b/src/gsi/get_gefs_ensperts_dualres.f90 @@ -68,6 +68,7 @@ subroutine get_gefs_ensperts_dualres use gsi_enscouplermod, only: gsi_enscoupler_create_sub2grid_info use gsi_enscouplermod, only: gsi_enscoupler_destroy_sub2grid_info use general_sub2grid_mod, only: sub2grid_info,general_sub2grid_create_info,general_sub2grid_destroy_info + use hybrid_ensemble_parameters, only: nsclgrp,sp_ens,spc_multwgt,global_spectral_filter_sd implicit none real(r_kind),pointer,dimension(:,:) :: ps @@ -78,6 +79,8 @@ subroutine get_gefs_ensperts_dualres real(r_kind),pointer,dimension(:,:):: x2 type(gsi_bundle),allocatable,dimension(:) :: en_read type(gsi_bundle):: en_bar + type(gsi_bundle) :: en_pertstmp1 + type(gsi_bundle) :: en_pertstmp2 ! type(gsi_grid) :: grid_ens real(r_kind) bar_norm,sig_norm,kapr,kap1 ! real(r_kind),allocatable,dimension(:,:):: z,sst2 @@ -91,6 +94,7 @@ subroutine get_gefs_ensperts_dualres ! integer(i_kind) il,jl logical ice,hydrometeor type(sub2grid_info) :: grd_tmp + integer(i_kind) :: ig0,ig ! Create perturbations grid and get variable names from perturbations if(en_perts(1,1,1)%grid%im/=grd_ens%lat2.or. & @@ -130,6 +134,16 @@ subroutine get_gefs_ensperts_dualres if ( istatus /= 0 ) & call die('get_gefs_ensperts_dualres',': trouble creating en_bar bundle, istatus =',istatus) +! Allocate bundle used for temporary usage + if( nsclgrp > 1 .and. global_spectral_filter_sd )then + call gsi_bundlecreate(en_pertstmp1,en_perts(1,1,1)%grid,'aux-ens-read',istatus,names2d=cvars2d,names3d=cvars3d) + call gsi_bundlecreate(en_pertstmp2,en_perts(1,1,1)%grid,'aux-ens-read',istatus,names2d=cvars2d,names3d=cvars3d) + if(istatus/=0) then + write(6,*)' get_gefs_ensperts_dualres: trouble creating en_read like tempbundle' + call stop2(999) + endif + end if + ! Allocate bundle used for reading members allocate(en_read(n_ens)) do n=1,n_ens @@ -318,12 +332,30 @@ subroutine get_gefs_ensperts_dualres end do end do ntlevs_ens_loop !end do over bins + if(nsclgrp > 1 .and. global_spectral_filter_sd) then + do m=1,ntlevs_ens + do n=1,n_ens + en_pertstmp1%values=en_perts(n,1,m)%valuesr4 + do ig=1,nsclgrp + call apply_scaledepwgts(grd_ens,sp_ens,en_pertstmp1,spc_multwgt(:,ig),en_pertstmp2) + en_perts(n,ig,m)%valuesr4=en_pertstmp2%values + enddo + enddo + enddo + endif + do n=n_ens,1,-1 call gsi_bundledestroy(en_read(n),istatus) if ( istatus /= 0 ) & call die('get_gefs_ensperts_dualres',': trouble destroying en_read bundle, istatus = ', istatus) end do deallocate(en_read) + if(nsclgrp > 1 .and. global_spectral_filter_sd) then + call gsi_bundledestroy(en_pertstmp1,istatus) + call gsi_bundledestroy(en_pertstmp2,istatus) + if ( istatus /= 0 ) & + call die('get_gefs_ensperts_dualres',': trouble destroying en_pertstmp2 bundle, istatus = ', istatus) + end if call gsi_enscoupler_destroy_sub2grid_info(grd_tmp) ! mm1=mype+1 diff --git a/src/gsi/gsi_files.cmake b/src/gsi/gsi_files.cmake index 461b49ddf6..1658c83bf4 100644 --- a/src/gsi/gsi_files.cmake +++ b/src/gsi/gsi_files.cmake @@ -87,6 +87,7 @@ anisofilter_glb.f90 antcorr_application.f90 antest_maps0.f90 antest_maps0_glb.f90 +apply_scaledepwgts.f90 atms_spatial_average_mod.f90 balmod.f90 berror.f90 diff --git a/src/gsi/gsi_rfv3io_mod.f90 b/src/gsi/gsi_rfv3io_mod.f90 index eb7a86160f..2edde4723f 100644 --- a/src/gsi/gsi_rfv3io_mod.f90 +++ b/src/gsi/gsi_rfv3io_mod.f90 @@ -68,6 +68,7 @@ module gsi_rfv3io_mod character(len=:),allocatable :: ak_bk !='fv3_akbk' character(len=:),allocatable :: dynvars !='fv3_dynvars' character(len=:),allocatable :: tracers !='fv3_tracer' + character(len=:),allocatable :: phyvars !='fv3_phyvars' character(len=:),allocatable :: sfcdata !='fv3_sfcdata' character(len=:),allocatable :: couplerres!='coupler.res' contains @@ -91,8 +92,9 @@ module gsi_rfv3io_mod type(sub2grid_info) :: grd_fv3lam_tracer_ionouv type(sub2grid_info) :: grd_fv3lam_tracerchem_ionouv type(sub2grid_info) :: grd_fv3lam_tracersmoke_ionouv + type(sub2grid_info) :: grd_fv3lam_phyvar_ionouv type(sub2grid_info) :: grd_fv3lam_uv - integer(i_kind) ,parameter:: ndynvarslist=13, ntracerslist=8 + integer(i_kind) ,parameter:: ndynvarslist=13, ntracerslist=8, nphyvarslist=1 character(len=max_varname_length), dimension(ndynvarslist), parameter :: & vardynvars = [character(len=max_varname_length) :: & @@ -100,12 +102,14 @@ module gsi_rfv3io_mod character(len=max_varname_length), dimension(ntracerslist+naero_cmaq_fv3+7+naero_smoke_fv3), parameter :: & vartracers = [character(len=max_varname_length) :: & 'q','oz','ql','qi','qr','qs','qg','qnr',aeronames_cmaq_fv3,'pm25at','pm25ac','pm25co','pm2_5','amassi','amassj','amassk',aeronames_smoke_fv3] - character(len=max_varname_length), dimension(15+naero_cmaq_fv3+7+naero_smoke_fv3), parameter :: & + character(len=max_varname_length), dimension(nphyvarslist), parameter :: & + varphyvars = [character(len=max_varname_length) :: 'dbz'] + character(len=max_varname_length), dimension(16+naero_cmaq_fv3+7+naero_smoke_fv3), parameter :: & varfv3name = [character(len=max_varname_length) :: & - 'u','v','W','T','delp','sphum','o3mr','liq_wat','ice_wat','rainwat','snowwat','graupel','rain_nc','ps','DZ', & + 'u','v','W','T','delp','sphum','o3mr','liq_wat','ice_wat','rainwat','snowwat','graupel','rain_nc','ref_f3d','ps','DZ', & aeronames_cmaq_fv3,'pm25at','pm25ac','pm25co','pm2_5','amassi','amassj','amassk',aeronames_smoke_fv3], & vgsiname = [character(len=max_varname_length) :: & - 'u','v','w','tsen','delp','q','oz','ql','qi','qr','qs','qg','qnr','ps','delzinc', & + 'u','v','w','tsen','delp','q','oz','ql','qi','qr','qs','qg','qnr','dbz','ps','delzinc', & aeronames_cmaq_fv3,'pm25at','pm25ac','pm25co','pm2_5','amassi','amassj','amassk',aeronames_smoke_fv3] character(len=max_varname_length),dimension(:),allocatable:: name_metvars2d character(len=max_varname_length),dimension(:),allocatable:: name_metvars3d @@ -119,6 +123,8 @@ module gsi_rfv3io_mod public :: gsi_fv3ncdf_read_v1 public :: gsi_fv3ncdf_readuv public :: gsi_fv3ncdf_readuv_v1 + public :: gsi_fv3ncdf_read_ens_parallel_over_ens + public :: gsi_fv3ncdf_readuv_ens_parallel_over_ens public :: read_fv3_files public :: read_fv3_netcdf_guess public :: wrfv3_netcdf @@ -131,6 +137,7 @@ module gsi_rfv3io_mod public :: ijns,ijns2d,displss,displss2d,ijnz,displsz_g public :: fv3lam_io_dynmetvars3d_nouv,fv3lam_io_tracermetvars3d_nouv public :: fv3lam_io_tracerchemvars3d_nouv,fv3lam_io_tracersmokevars3d_nouv + public :: fv3lam_io_phymetvars3d_nouv public :: fv3lam_io_dynmetvars2d_nouv,fv3lam_io_tracermetvars2d_nouv integer(i_kind) mype_u,mype_v,mype_t,mype_q,mype_p,mype_delz,mype_oz,mype_ql @@ -158,6 +165,7 @@ module gsi_rfv3io_mod ! copy of cvars3d excluding uv 3-d fields character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_io_tracermetvars3d_nouv ! copy of cvars3d excluding uv 3-d fields + character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_io_phymetvars3d_nouv character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_io_tracerchemvars3d_nouv character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_io_tracersmokevars3d_nouv ! copy of cvars3d excluding uv 3-d fields @@ -169,8 +177,10 @@ module gsi_rfv3io_mod !to define names in gsibundle character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_names_gsibundle_tracer_nouv !to define names in gsibundle + character(len=max_varname_length),allocatable,dimension(:) :: fv3lam_names_gsibundle_phyvar_nouv type(gsi_bundle):: gsibundle_fv3lam_dynvar_nouv type(gsi_bundle):: gsibundle_fv3lam_tracer_nouv + type(gsi_bundle):: gsibundle_fv3lam_phyvar_nouv type(gsi_bundle):: gsibundle_fv3lam_tracerchem_nouv type(gsi_bundle):: gsibundle_fv3lam_tracersmoke_nouv @@ -204,6 +214,12 @@ subroutine fv3regfilename_init(this,it) write(filename,"(A11,I2.2)") 'fv3_tracer_',ifilesig(it) this%tracers=trim(filename) endif + if (it == ntguessig) then + this%phyvars='fv3_phyvars' + else + write(filename,"(A12,I2.2)") 'fv3_phyvars_',ifilesig(it) + this%phyvars=trim(filename) + endif if (it == ntguessig) then this%sfcdata='fv3_sfcdata' else @@ -740,6 +756,9 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) ! ! abstract: read guess for FV3 regional model ! program history log: +! 2022-04-01 Y. Wang and X. Wang - add capability to read reflectivity +! for direct radar EnVar DA using reflectivity as state +! variable, poc: xuguang.wang@ou.edu ! attributes: ! language: f90 ! machine: ibm RS/6000 SP @@ -768,6 +787,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) use gsi_metguess_mod, only: gsi_metguess_get use netcdf, only:nf90_open,nf90_close,nf90_inquire,nf90_nowrite, nf90_format_netcdf4 use gsi_chemguess_mod, only: gsi_chemguess_get + use obsmod, only: if_model_dbz implicit none @@ -797,7 +817,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) real(r_kind),dimension(:,:,:),pointer::ges_qg=>NULL() real(r_kind),dimension(:,:,:),pointer::ges_qnr=>NULL() real(r_kind),dimension(:,:,:),pointer::ges_w=>NULL() - + real(r_kind),dimension(:,:,:),pointer::ges_dbz=>NULL() real(r_kind),dimension(:,:,:),pointer::ges_aalj=>NULL() real(r_kind),dimension(:,:,:),pointer::ges_acaj=>NULL() @@ -890,8 +910,8 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) integer(i_kind),dimension(:,:),allocatable:: lnames integer(i_kind),dimension(:,:),allocatable:: uvlnames integer(i_kind):: inner_vars,numfields - integer(i_kind):: ndynvario2d,ntracerio2d,ilev,jdynvar,jtracer - integer(i_kind):: iuv,ndynvario3d,ntracerio3d + integer(i_kind):: ndynvario2d,ntracerio2d,ilev,jdynvar,jtracer,jphyvar + integer(i_kind):: iuv,ndynvario3d,ntracerio3d,nphyvario3d integer(i_kind):: ntracerchemio3d,ntracersmokeio3d integer(i_kind):: loc_id,ncfmt @@ -957,6 +977,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) iuv=0 ndynvario3d=0 ntracerio3d=0 + nphyvario3d=0 do i=1,size(name_metvars3d) vartem=trim(name_metvars3d(i)) if(trim(vartem)=='u'.or.trim(vartem)=='v') then @@ -967,6 +988,8 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) ndynvario3d=ndynvario3d+1 else if (ifindstrloc(vartracers,trim(vartem))> 0) then ntracerio3d=ntracerio3d+1 + else if (ifindstrloc(varphyvars,trim(vartem))> 0) then + nphyvario3d=nphyvario3d+1 else write(6,*)'the metvarname1 ',trim(vartem),' has not been considered yet, stop' call stop2(333) @@ -978,6 +1001,12 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) write(6,*)"the set up for met variable is not as expected, abort" call stop2(222) endif + if ( if_model_dbz ) then + if( nphyvario3d<=0 ) then + write(6,*)"the set up for met variable (phyvar) is not as expected, abort" + call stop2(223) + end if + endif if (fv3sar_bg_opt == 0.and.ifindstrloc(name_metvars3d,'delp') <= 0)then ndynvario3d=ndynvario3d+1 ! for delp endif @@ -987,6 +1016,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) if (l_reg_update_hydro_delz.and.fv3sar_bg_opt==0) ndynvario3d=ndynvario3d+1 ! for delzinc allocate(fv3lam_io_dynmetvars3d_nouv(ndynvario3d)) allocate(fv3lam_io_tracermetvars3d_nouv(ntracerio3d)) + allocate(fv3lam_io_phymetvars3d_nouv(nphyvario3d)) if (laeroana_fv3cmaq) then allocate(fv3lam_io_tracerchemvars3d_nouv(naero_cmaq_fv3+7)) @@ -998,6 +1028,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) jdynvar=0 jtracer=0 + jphyvar=0 do i=1,size(name_metvars3d) vartem=trim(name_metvars3d(i)) if(.not.(trim(vartem)=='u'.or.trim(vartem)=='v'.or.trim(vartem)=='iqr')) then @@ -1012,6 +1043,9 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) else if (ifindstrloc(vartracers,trim(vartem)) > 0) then jtracer=jtracer+1 fv3lam_io_tracermetvars3d_nouv(jtracer)=trim(vartem) + else if (ifindstrloc(varphyvars,trim(vartem)) > 0) then + jphyvar=jphyvar+1 + fv3lam_io_phymetvars3d_nouv(jphyvar)=trim(vartem) else write(6,*)'the metvarname ',vartem,' is not expected, stop' call flush(6) @@ -1027,7 +1061,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) jdynvar=jdynvar+1 fv3lam_io_dynmetvars3d_nouv(jdynvar)="delzinc" endif - if(jdynvar /= ndynvario3d.or.jtracer /= ntracerio3d ) then + if(jdynvar /= ndynvario3d.or.jtracer /= ntracerio3d.or.jphyvar /= nphyvario3d ) then write(6,*)'ndynvario3d is not as expected, stop' call flush(6) call stop2(333) @@ -1035,6 +1069,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) if(mype == 0) then write(6,*) ' fv3lam_io_dynmetvars3d_nouv is ',(trim(fv3lam_io_dynmetvars3d_nouv(i)),i=1,ndynvario3d) write(6,*) ' fv3lam_io_tracermevars3d_nouv is ',(trim(fv3lam_io_tracermetvars3d_nouv(i)),i=1,ntracerio3d) + write(6,*) ' fv3lam_io_phymetvars3d_nouv is ',(trim(fv3lam_io_phymetvars3d_nouv(i)),i=1,nphyvario3d) endif ndynvario2d=0 @@ -1047,7 +1082,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) else if (ifindstrloc(vartracers,trim(vartem)) > 0) then ntracerio2d=ntracerio2d+1 else if(trim(vartem)=='z') then - write(6,*)'the metvarname ',trim(vartem),' will be dealt separately' + if(mype == 0) write(6,*)'the metvarname ',trim(vartem),' will be dealt separately' else if(trim(vartem)=='t2m') then else if(trim(vartem)=='q2m') then else @@ -1165,6 +1200,11 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) ntracerio2d=0 endif + if( if_model_dbz )then + call gsi_bundlecreate(gsibundle_fv3lam_phyvar_nouv,GSI_MetGuess_Bundle(it)%grid,'gsibundle_fv3lam_phyvar_nouv',istatus, & + names3d=fv3lam_io_phymetvars3d_nouv) + end if + if (laeroana_fv3cmaq) then if (allocated(fv3lam_io_tracerchemvars3d_nouv) ) then call gsi_bundlecreate(gsibundle_fv3lam_tracerchem_nouv,GSI_ChemGuess_Bundle(it)%grid,'gsibundle_fv3lam_tracerchem_nouv',istatus, & @@ -1254,6 +1294,22 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) endif + if ( if_model_dbz )then + inner_vars=1 + numfields=inner_vars*(nphyvario3d*grd_a%nsig) + deallocate(lnames,names) + allocate(lnames(1,numfields),names(1,numfields)) + ilev=1 + do i=1,nphyvario3d + do k=1,grd_a%nsig + lnames(1,ilev)=k + names(1,ilev)=trim(fv3lam_io_phymetvars3d_nouv(i)) + ilev=ilev+1 + enddo + enddo + call general_sub2grid_create_info(grd_fv3lam_phyvar_ionouv,inner_vars,grd_a%nlat,& + grd_a%nlon,grd_a%nsig,numfields,regional,names=names,lnames=lnames) + end if inner_vars=2 numfields=grd_a%nsig @@ -1279,15 +1335,19 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'tv' ,ges_tv ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'q' ,ges_q ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'oz' ,ges_oz ,istatus );ier=ier+istatus - if (l_use_dbz_directDA) then + if (l_use_dbz_directDA .or. if_model_dbz) then call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'ql' ,ges_ql ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qi' ,ges_qi ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qr' ,ges_qr ,istatus );ier=ier+istatus - call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'iqr' ,ges_iqr ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qs' ,ges_qs ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qg' ,ges_qg ,istatus );ier=ier+istatus - call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qnr',ges_qnr ,istatus );ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'w' , ges_w ,istatus );ier=ier+istatus + if (l_use_dbz_directDA) then + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'iqr' ,ges_iqr ,istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qnr',ges_qnr ,istatus );ier=ier+istatus + end if + if(if_model_dbz) & + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'dbz' , ges_dbz ,istatus );ier=ier+istatus end if if (ier/=0) call die(trim(myname),'cannot get pointers for fv3 met-fields, ier =',ier) @@ -1343,6 +1403,10 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) & ,fv3filenamegin(it)%dynvars,fv3filenamegin(it)) call gsi_fv3ncdf_read(grd_fv3lam_tracer_ionouv,gsibundle_fv3lam_tracer_nouv & & ,fv3filenamegin(it)%tracers,fv3filenamegin(it)) + if( if_model_dbz )then + call gsi_fv3ncdf_read(grd_fv3lam_phyvar_ionouv,gsibundle_fv3lam_phyvar_nouv & + & ,fv3filenamegin(it)%phyvars,fv3filenamegin(it)) + end if if (laeroana_fv3cmaq) then call gsi_fv3ncdf_read(grd_fv3lam_tracerchem_ionouv,gsibundle_fv3lam_tracerchem_nouv & & ,fv3filenamegin(it)%tracers,fv3filenamegin(it)) @@ -1439,6 +1503,7 @@ subroutine read_fv3_netcdf_guess(fv3filenamegin) call gsi_copy_bundle(gsibundle_fv3lam_tracersmoke_nouv,GSI_ChemGuess_Bundle(it)) endif + if(if_model_dbz) call gsi_copy_bundle(gsibundle_fv3lam_phyvar_nouv,GSI_MetGuess_Bundle(it)) call GSI_BundleGetPointer ( gsibundle_fv3lam_dynvar_nouv, 'tsen' ,ges_tsen_readin ,istatus );ier=ier+istatus !! tsen2tv !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! do k=1,nsig @@ -2151,13 +2216,15 @@ subroutine gsi_fv3ncdf_read(grd_ionouv,cstate_nouv,filenamein,fv3filenamegin) character(len=max_varname_length) :: varname,vgsiname character(len=max_varname_length) :: name character(len=max_varname_length) :: filenamein2 - + real(r_kind),allocatable,dimension(:,:):: uu2d_tmp + integer(i_kind) :: countloc_tmp(3),startloc_tmp(3) integer(i_kind) nlatcase,nloncase,nxcase,nycase,countloc(3),startloc(3) integer(i_kind) ilev,ilevtot,inative - integer(i_kind) kbgn,kend + integer(i_kind) kbgn,kend,len + logical :: phy_smaller_domain integer(i_kind) gfile_loc,iret,var_id - integer(i_kind) nz,nzp1,mm1 + integer(i_kind) nz,nzp1,mm1,nx_phy ! for io_layout > 1 real(r_kind),allocatable,dimension(:,:):: uu2d_layout integer(i_kind) :: nio @@ -2212,6 +2279,22 @@ subroutine gsi_fv3ncdf_read(grd_ionouv,cstate_nouv,filenamein,fv3filenamegin) inative=nzp1-ilev startloc=(/1,1,inative/) countloc=(/nxcase,nycase,1/) + ! Variable ref_f3d in phy_data.nc has a smaller domain size than + ! dynvariables and tracers as well as a reversed order in vertical + if ( trim(adjustl(varname)) == 'ref_f3d' )then + iret=nf90_inquire_dimension(gfile_loc,1,name,len) + if(trim(name)=='xaxis_1') nx_phy=len + if( nx_phy == nxcase )then + allocate(uu2d_tmp(nxcase,nycase)) + countloc_tmp=(/nxcase,nycase,1/) + phy_smaller_domain = .false. + else + allocate(uu2d_tmp(nxcase-6,nycase-6)) + countloc_tmp=(/nxcase-6,nycase-6,1/) + phy_smaller_domain = .true. + end if + startloc_tmp=(/1,1,ilev/) + end if if(fv3_io_layout_y > 1) then do nio=0,fv3_io_layout_y-1 @@ -2224,7 +2307,22 @@ subroutine gsi_fv3ncdf_read(grd_ionouv,cstate_nouv,filenamein,fv3filenamegin) enddo else iret=nf90_inq_varid(gfile_loc,trim(adjustl(varname)),var_id) - iret=nf90_get_var(gfile_loc,var_id,uu2d,start=startloc,count=countloc) + if ( trim(adjustl(varname)) == 'ref_f3d' )then + uu2d = 0.0_r_kind + iret=nf90_get_var(gfile_loc,var_id,uu2d_tmp,start=startloc_tmp,count=countloc_tmp) + where(uu2d_tmp < 0.0_r_kind) + uu2d_tmp = 0.0_r_kind + endwhere + + if( phy_smaller_domain )then + uu2d(4:nxcase-3,4:nycase-3) = uu2d_tmp + else + uu2d(1:nxcase,1:nycase) = uu2d_tmp + end if + deallocate(uu2d_tmp) + else + iret=nf90_get_var(gfile_loc,var_id,uu2d,start=startloc,count=countloc) + end if endif call fv3_h_to_ll(uu2d,hwork(1,:,:,ilevtot),nxcase,nycase,nloncase,nlatcase,grid_reverse_flag) @@ -2642,6 +2740,395 @@ subroutine gsi_fv3ncdf_readuv_v1(grd_uv,ges_u,ges_v,fv3filenamegin) end subroutine gsi_fv3ncdf_readuv_v1 +subroutine gsi_fv3ncdf_read_ens_parallel_over_ens(filenamein,fv3filenamegin, & + delp,tsen,w,q,oz,ql,qr,qs,qi,qg,dbz,iope) +!$$$ subprogram documentation block +! . . . . +! subprogram: gsi_fv3ncdf_read_ens_parallel_over_ens +! program history log: +! 2022-04-01 Y. Wang and X. Wang, changed from gsi_fv3ncdf_read_ens +! for FV3LAM ensemble parallel IO in hybrid EnVar +! poc: xuguang.wang@ou.edu +! +! abstract: read in fields excluding u and v +! program history log: +! +! input argument list: +! filenamein - file name to read from +! iope - pe to read in the field +! +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ end documentation block + + + use kinds, only: r_kind,i_kind + use mpimod, only: mpi_comm_world,mpi_rtype,mype + use mpimod, only: MPI_INFO_NULL + use netcdf, only: nf90_open,nf90_close,nf90_get_var,nf90_noerr + use netcdf, only: nf90_nowrite,nf90_inquire,nf90_inquire_dimension + use netcdf, only: nf90_inquire_variable + use netcdf, only: nf90_inq_varid + use gridmod, only: nsig,nlon,nlat + use mod_fv3_lola, only: fv3_h_to_ll + use gsi_bundlemod, only: gsi_bundle + use general_sub2grid_mod, only: sub2grid_info,general_grid2sub + + implicit none + character(*),intent(in):: filenamein + type (type_fv3regfilenameg),intent(in) ::fv3filenamegin + integer(i_kind) ,intent(in ) :: iope + real(r_kind),allocatable,dimension(:,:):: uu2d, uu2d_tmp + real(r_kind),allocatable,dimension(:):: wrk_send_2d + real(r_kind),dimension(nlat,nlon,nsig):: hwork + real(r_kind),dimension(nlat,nlon,nsig),intent(out),optional:: delp,tsen,w,q,oz,ql,qr,qs,qi,qg,dbz + character(len=max_varname_length) :: varname,vgsiname + character(len=max_varname_length) :: name + character(len=max_varname_length), allocatable,dimension(:) :: varname_files + + integer(i_kind) nlatcase,nloncase,nxcase,nycase,countloc(3),startloc(3),countloc_tmp(3),startloc_tmp(3) + integer(i_kind) ilev,ilevtot,inative,ivar + integer(i_kind) kbgn,kend + integer(i_kind) gfile_loc,iret,var_id + integer(i_kind) nz,nzp1,mm1,len,nx_phy + logical :: phy_smaller_domain +! for io_layout > 1 + real(r_kind),allocatable,dimension(:,:):: uu2d_layout + integer(i_kind) :: nio + integer(i_kind),allocatable :: gfile_loc_layout(:) + character(len=180) :: filename_layout + + mm1=mype+1 + nloncase=nlon + nlatcase=nlat + nxcase=nx + nycase=ny + kbgn=1 + kend=nsig + + if( mype == iope )then + allocate(uu2d(nxcase,nycase)) + if( present(delp).or.present(tsen).or.present(w) )then ! dynvars + if( present(w) )then + allocate(varname_files(3)) + varname_files = (/'T ','delp','W '/) + else + allocate(varname_files(2)) + varname_files = (/'T ','delp'/) + end if + end if + if( present(q).or.present(ql).or.present(qr) )then ! tracers + if(present(qr))then + allocate(varname_files(7)) + varname_files = (/'sphum ','o3mr ','liq_wat','ice_wat','rainwat','snowwat','graupel'/) + else + allocate(varname_files(2)) + varname_files = (/'sphum',' o3mr'/) + end if + end if + if( present(dbz) )then ! phyvars: dbz + allocate(varname_files(1)) + varname_files = (/'ref_f3d'/) + end if + + if(fv3_io_layout_y > 1) then + allocate(gfile_loc_layout(0:fv3_io_layout_y-1)) + do nio=0,fv3_io_layout_y-1 + write(filename_layout,'(a,a,I4.4)') trim(filenamein),'.',nio + iret=nf90_open(filename_layout,nf90_nowrite,gfile_loc_layout(nio),comm=mpi_comm_world,info=MPI_INFO_NULL) + if(iret/=nf90_noerr) then + write(6,*)' gsi_fv3ncdf_read: problem opening ',trim(filename_layout),gfile_loc_layout(nio),', Status = ',iret + call flush(6) + call stop2(333) + endif + enddo + else + iret=nf90_open(filenamein,nf90_nowrite,gfile_loc) + if(iret/=nf90_noerr) then + write(6,*)' gsi_fv3ncdf_read: problem opening ',trim(filenamein),gfile_loc,', Status = ',iret + call flush(6) + call stop2(333) + endif + endif + do ivar = 1, size(varname_files) + do ilevtot=kbgn,kend + ilev=ilevtot + nz=nsig + nzp1=nz+1 + inative=nzp1-ilev + startloc=(/1,1,inative/) + countloc=(/nxcase,nycase,1/) + varname = trim(varname_files(ivar)) + ! Variable ref_f3d in phy_data.nc has a smaller domain size than + ! dynvariables and tracers as well as a reversed order in vertical + if ( trim(adjustl(varname)) == 'ref_f3d' )then + iret=nf90_inquire_dimension(gfile_loc,1,name,len) + if(trim(name)=='xaxis_1') nx_phy=len + if( nx_phy == nxcase )then + allocate(uu2d_tmp(nxcase,nycase)) + countloc_tmp=(/nxcase,nycase,1/) + phy_smaller_domain = .false. + else + allocate(uu2d_tmp(nxcase-6,nycase-6)) + countloc_tmp=(/nxcase-6,nycase-6,1/) + phy_smaller_domain = .true. + end if + startloc_tmp=(/1,1,ilev/) + end if + + if(fv3_io_layout_y > 1) then + do nio=0,fv3_io_layout_y-1 + countloc=(/nxcase,ny_layout_len(nio),1/) + allocate(uu2d_layout(nxcase,ny_layout_len(nio))) + iret=nf90_inq_varid(gfile_loc_layout(nio),trim(adjustl(varname)),var_id) + iret=nf90_get_var(gfile_loc_layout(nio),var_id,uu2d_layout,start=startloc,count=countloc) + uu2d(:,ny_layout_b(nio):ny_layout_e(nio))=uu2d_layout + deallocate(uu2d_layout) + enddo + else + iret=nf90_inq_varid(gfile_loc,trim(adjustl(varname)),var_id) + if ( trim(adjustl(varname)) == 'ref_f3d' )then + uu2d = 0.0_r_kind + iret=nf90_get_var(gfile_loc,var_id,uu2d_tmp,start=startloc_tmp,count=countloc_tmp) + where(uu2d_tmp < 0.0_r_kind) + uu2d_tmp = 0.0_r_kind + endwhere + if(phy_smaller_domain)then + uu2d(4:nxcase-3,4:nycase-3) = uu2d_tmp + else + uu2d = uu2d_tmp + end if + deallocate(uu2d_tmp) + else + iret=nf90_get_var(gfile_loc,var_id,uu2d,start=startloc,count=countloc) + end if + endif + call fv3_h_to_ll(uu2d,hwork(:,:,ilevtot),nxcase,nycase,nloncase,nlatcase,grid_reverse_flag) + enddo ! ilevtot + if( present(delp).or.present(tsen).or.present(w) )then ! dynvars + if(ivar == 1)then + tsen = hwork + else if(ivar == 2)then + delp = hwork + end if + if( present(w) .and. ivar == 3 )then + w = hwork + end if + end if + if( present(q).or.present(ql).or.present(qr) )then ! tracers + if(ivar == 1)then + q = hwork + else if(ivar == 2)then + oz = hwork + end if + if(present(qr))then + if(ivar == 3)then + ql = hwork + else if(ivar == 4)then + qi = hwork + else if(ivar == 5)then + qr = hwork + else if(ivar == 6)then + qs = hwork + else if(ivar == 7)then + qg = hwork + end if + end if + end if + if( present(dbz) )then ! phyvars: dbz + dbz = hwork + end if + + end do + + if(fv3_io_layout_y > 1) then + do nio=1,fv3_io_layout_y-1 + iret=nf90_close(gfile_loc_layout(nio)) + enddo + deallocate(gfile_loc_layout) + else + iret=nf90_close(gfile_loc) + endif + + deallocate (uu2d,varname_files) + end if + + return +end subroutine gsi_fv3ncdf_read_ens_parallel_over_ens + +subroutine gsi_fv3ncdf_readuv_ens_parallel_over_ens(ges_u,ges_v,fv3filenamegin,iope) +!$$$ subprogram documentation block +! . . . . +! subprogram: gsi_fv3ncdf_readuv_ens_parallel_over_ens +! program history log: +! 2022-04-01 Y. Wang and X. Wang, changed from gsi_fv3ncdf_readuv_ens +! for FV3LAM ensemble parallel IO in hybrid EnVar +! poc: xuguang.wang@ou.edu +! +! abstract: read in a field from a netcdf FV3 file in mype_u,mype_v +! then scatter the field to each PE +! program history log: +! +! input argument list: +! +! output argument list: +! ges_u - output sub domain u field +! ges_v - output sub domain v field +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ end documentation block + use kinds, only: r_kind,i_kind + use mpimod, only: mpi_comm_world,mpi_rtype,mype,mpi_info_null + use gridmod, only: nsig,nlon,nlat + use netcdf, only: nf90_open,nf90_close,nf90_get_var,nf90_noerr + use netcdf, only: nf90_nowrite,nf90_inquire,nf90_inquire_dimension + use netcdf, only: nf90_inquire_variable + use netcdf, only: nf90_inq_varid + use mod_fv3_lola, only: fv3_h_to_ll,fv3uv2earth + use general_sub2grid_mod, only: sub2grid_info,general_grid2sub + + implicit none + real(r_kind) ,intent(out ) :: ges_u(nlat,nlon,nsig) + real(r_kind) ,intent(out ) :: ges_v(nlat,nlon,nsig) + type (type_fv3regfilenameg),intent (in) :: fv3filenamegin + integer(i_kind), intent(in) :: iope + real(r_kind),dimension(2,nlat,nlon,nsig):: hwork + character(:), allocatable:: filenamein + real(r_kind),allocatable,dimension(:,:):: u2d,v2d + real(r_kind),allocatable,dimension(:,:):: uc2d,vc2d + character(len=max_varname_length) :: varname,vgsiname + real(r_kind),allocatable,dimension(:,:,:,:):: worksub + integer(i_kind) u_grd_VarId,v_grd_VarId + integer(i_kind) nlatcase,nloncase + integer(i_kind) nxcase,nycase + integer(i_kind) u_countloc(3),u_startloc(3),v_countloc(3),v_startloc(3) + integer(i_kind) inative,ilev,ilevtot + integer(i_kind) kbgn,kend + + integer(i_kind) gfile_loc,iret + integer(i_kind) nz,nzp1,mm1 + +! for fv3_io_layout_y > 1 + real(r_kind),allocatable,dimension(:,:):: u2d_layout,v2d_layout + integer(i_kind) :: nio + integer(i_kind),allocatable :: gfile_loc_layout(:) + character(len=180) :: filename_layout + + mm1=mype+1 + nloncase=nlon + nlatcase=nlat + nxcase=nx + nycase=ny + kbgn=1 + kend=nsig + if( mype == iope )then + allocate(u2d(nxcase,nycase+1)) + allocate(v2d(nxcase+1,nycase)) + allocate(uc2d(nxcase,nycase)) + allocate(vc2d(nxcase,nycase)) + filenamein=fv3filenamegin%dynvars + + if(fv3_io_layout_y > 1) then + allocate(gfile_loc_layout(0:fv3_io_layout_y-1)) + do nio=0,fv3_io_layout_y-1 + write(filename_layout,'(a,a,I4.4)') trim(filenamein),".",nio + iret=nf90_open(filename_layout,nf90_nowrite,gfile_loc_layout(nio),comm=mpi_comm_world,info=MPI_INFO_NULL) + if(iret/=nf90_noerr) then + write(6,*)'problem opening ',trim(filename_layout),gfile_loc_layout(nio),', Status = ',iret + call flush(6) + call stop2(333) + endif + enddo + else + iret=nf90_open(filenamein,nf90_nowrite,gfile_loc) + if(iret/=nf90_noerr) then + write(6,*)' problem opening ',trim(filenamein),', Status = ',iret + call flush(6) + call stop2(333) + endif + endif + do ilevtot=kbgn,kend + ilev=ilevtot + nz=nsig + nzp1=nz+1 + inative=nzp1-ilev + u_countloc=(/nxcase,nycase+1,1/) + v_countloc=(/nxcase+1,nycase,1/) + u_startloc=(/1,1,inative/) + v_startloc=(/1,1,inative/) + + if(fv3_io_layout_y > 1) then + do nio=0,fv3_io_layout_y-1 + u_countloc=(/nxcase,ny_layout_len(nio)+1,1/) + allocate(u2d_layout(nxcase,ny_layout_len(nio)+1)) + call check( nf90_inq_varid(gfile_loc_layout(nio),'u',u_grd_VarId) ) + iret=nf90_get_var(gfile_loc_layout(nio),u_grd_VarId,u2d_layout,start=u_startloc,count=u_countloc) + u2d(:,ny_layout_b(nio):ny_layout_e(nio))=u2d_layout(:,1:ny_layout_len(nio)) + if(nio==fv3_io_layout_y-1) u2d(:,ny_layout_e(nio)+1)=u2d_layout(:,ny_layout_len(nio)+1) + deallocate(u2d_layout) + + v_countloc=(/nxcase+1,ny_layout_len(nio),1/) + allocate(v2d_layout(nxcase+1,ny_layout_len(nio))) + call check( nf90_inq_varid(gfile_loc_layout(nio),'v',v_grd_VarId) ) + iret=nf90_get_var(gfile_loc_layout(nio),v_grd_VarId,v2d_layout,start=v_startloc,count=v_countloc) + v2d(:,ny_layout_b(nio):ny_layout_e(nio))=v2d_layout + deallocate(v2d_layout) + enddo + else + call check( nf90_inq_varid(gfile_loc,'u',u_grd_VarId) ) + iret=nf90_get_var(gfile_loc,u_grd_VarId,u2d,start=u_startloc,count=u_countloc) + call check( nf90_inq_varid(gfile_loc,'v',v_grd_VarId) ) + iret=nf90_get_var(gfile_loc,v_grd_VarId,v2d,start=v_startloc,count=v_countloc) + endif + + if(.not.grid_reverse_flag) then + call reverse_grid_r_uv (u2d,nxcase,nycase+1,1) + call reverse_grid_r_uv (v2d,nxcase+1,nycase,1) + endif + call fv3uv2earth(u2d(:,:),v2d(:,:),nxcase,nycase,uc2d,vc2d) + + ! NOTE on transfor to earth u/v: + ! The u and v before transferring need to be in E-W/N-S grid, which is + ! defined as reversed grid here because it is revered from map view. + ! + ! Have set the following flag for grid orientation + ! grid_reverse_flag=true: E-W/N-S grid + ! grid_reverse_flag=false: W-E/S-N grid + ! + ! So for preparing the wind transferring, need to reverse the grid + ! from + ! W-E/S-N grid to E-W/N-S grid when grid_reverse_flag=false: + ! + ! if(.not.grid_reverse_flag) call reverse_grid_r_uv + ! + ! and the last input parameter for fv3_h_to_ll is alway true: + ! + ! + call fv3_h_to_ll(uc2d,hwork(1,:,:,ilevtot),nxcase,nycase,nloncase,nlatcase,.true.) + call fv3_h_to_ll(vc2d,hwork(2,:,:,ilevtot),nxcase,nycase,nloncase,nlatcase,.true.) + enddo ! ilevtot + if(fv3_io_layout_y > 1) then + do nio=0,fv3_io_layout_y-1 + iret=nf90_close(gfile_loc_layout(nio)) + enddo + deallocate(gfile_loc_layout) + else + iret=nf90_close(gfile_loc) + endif + deallocate(u2d,v2d,uc2d,vc2d) + ges_u = hwork(1,:,:,:) + ges_v = hwork(2,:,:,:) + end if ! mype + +end subroutine gsi_fv3ncdf_readuv_ens_parallel_over_ens + + subroutine wrfv3_netcdf(fv3filenamegin) !$$$ subprogram documentation block ! . . . . @@ -2653,7 +3140,8 @@ subroutine wrfv3_netcdf(fv3filenamegin) ! program history log: ! 2019-04-18 CAPS(C. Tong) - import direct reflectivity DA capabilities ! 2019-11-22 CAPS(C. Tong) - modify "add_saved" to properly output analyses -! 2021-01-05 x.zhang/lei - add code for updating delz analysis in regional da +! 2021-01-05 x.zhang/lei - add code for updating delz analysis in regional da +! 2022-04-01 Y. Wang and X. Wang - add code for updating reflectivity ! ! input argument list: ! @@ -2680,6 +3168,7 @@ subroutine wrfv3_netcdf(fv3filenamegin) use directDA_radaruse_mod, only: l_cvpnr, cvpnr_pval use gridmod, only: eta1_ll,eta2_ll use constants, only: one + use obsmod, only: if_model_dbz implicit none @@ -2705,6 +3194,7 @@ subroutine wrfv3_netcdf(fv3filenamegin) real(r_kind),pointer,dimension(:,:,:):: ges_qg =>NULL() real(r_kind),pointer,dimension(:,:,:):: ges_qnr =>NULL() real(r_kind),pointer,dimension(:,:,:):: ges_w =>NULL() + real(r_kind),pointer,dimension(:,:,:):: ges_dbz =>NULL() real(r_kind),pointer,dimension(:,:,:):: ges_delzinc =>NULL() real(r_kind),pointer,dimension(:,:,:):: ges_delp =>NULL() real(r_kind),dimension(:,: ),allocatable:: ges_ps_write @@ -2794,14 +3284,17 @@ subroutine wrfv3_netcdf(fv3filenamegin) call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'u' , ges_u ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'v' , ges_v ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'q' ,ges_q ,istatus);ier=ier+istatus - if (l_use_dbz_directDA) then + if (l_use_dbz_directDA .or. if_model_dbz) then call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'ql' ,ges_ql ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qi' ,ges_qi ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qr' ,ges_qr ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qs' ,ges_qs ,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qg' ,ges_qg ,istatus);ier=ier+istatus + if (l_use_dbz_directDA) & call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qnr',ges_qnr,istatus);ier=ier+istatus call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'w' , ges_w ,istatus);ier=ier+istatus + if( if_model_dbz )& + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'dbz' , ges_dbz ,istatus);ier=ier+istatus end if if(i_use_2mq4b > 0 .and. i_use_2mt4b > 0 ) then call GSI_BundleGetPointer (GSI_MetGuess_Bundle(it),'q2m',ges_q2m,istatus); ier=ier+istatus @@ -2931,6 +3424,7 @@ subroutine wrfv3_netcdf(fv3filenamegin) call gsi_copy_bundle(GSI_MetGuess_Bundle(it),gsibundle_fv3lam_dynvar_nouv) call gsi_copy_bundle(GSI_MetGuess_Bundle(it),gsibundle_fv3lam_tracer_nouv) + if( if_model_dbz ) call gsi_copy_bundle(GSI_MetGuess_Bundle(it),gsibundle_fv3lam_phyvar_nouv) if (laeroana_fv3cmaq) then call gsi_copy_bundle(GSI_ChemGuess_Bundle(it),gsibundle_fv3lam_tracerchem_nouv) end if @@ -2979,6 +3473,10 @@ subroutine wrfv3_netcdf(fv3filenamegin) add_saved,fv3filenamegin%dynvars,fv3filenamegin) call gsi_fv3ncdf_write(grd_fv3lam_tracer_ionouv,gsibundle_fv3lam_tracer_nouv, & add_saved,fv3filenamegin%tracers,fv3filenamegin) + if( if_model_dbz ) then + call gsi_fv3ncdf_write(grd_fv3lam_phyvar_ionouv,gsibundle_fv3lam_phyvar_nouv,& + add_saved,fv3filenamegin%phyvars,fv3filenamegin) + end if call gsi_fv3ncdf_writeuv(grd_fv3lam_uv,ges_u,ges_v,add_saved,fv3filenamegin) if (laeroana_fv3cmaq) then call gsi_fv3ncdf_write(grd_fv3lam_tracerchem_ionouv,gsibundle_fv3lam_tracerchem_nouv, & @@ -3598,6 +4096,7 @@ subroutine gsi_fv3ncdf_write(grd_ionouv,cstate_nouv,add_saved,filenamein,fv3file use netcdf, only: nf90_open,nf90_close use netcdf, only: nf90_write,nf90_inq_varid use netcdf, only: nf90_put_var,nf90_get_var + use netcdf, only: nf90_inquire_dimension use gsi_bundlemod, only: gsi_bundle use general_sub2grid_mod, only: sub2grid_info,general_sub2grid implicit none @@ -3609,16 +4108,19 @@ subroutine gsi_fv3ncdf_write(grd_ionouv,cstate_nouv,add_saved,filenamein,fv3file type (type_fv3regfilenameg),intent (in) :: fv3filenamegin real(r_kind),dimension(1,grd_ionouv%nlat,grd_ionouv%nlon,grd_ionouv%kbegin_loc:grd_ionouv%kend_alloc):: hwork character(len=max_varname_length) :: filenamein2 - character(len=max_varname_length) :: varname,vgsiname + character(len=max_varname_length) :: varname,vgsiname,name integer(i_kind) nlatcase,nloncase,nxcase,nycase,countloc(3),startloc(3) + integer(i_kind) countloc_tmp(3),startloc_tmp(3) integer(i_kind) kbgn,kend integer(i_kind) inative,ilev,ilevtot integer(i_kind) :: VarId,gfile_loc - integer(i_kind) mm1,nzp1 + integer(i_kind) mm1,nzp1,len,nx_phy,iret + logical :: phy_smaller_domain real(r_kind),allocatable,dimension(:,:):: work_a real(r_kind),allocatable,dimension(:,:):: work_b real(r_kind),allocatable,dimension(:,:):: workb2,worka2 + real(r_kind),allocatable,dimension(:,:):: work_b_tmp ! for io_layout > 1 real(r_kind),allocatable,dimension(:,:):: work_b_layout @@ -3673,7 +4175,20 @@ subroutine gsi_fv3ncdf_write(grd_ionouv,cstate_nouv,add_saved,filenamein,fv3file work_a=hwork(1,:,:,ilevtot) - + if( trim(varname) == 'ref_f3d' )then + iret=nf90_inquire_dimension(gfile_loc,1,name,len) + if(trim(name)=='xaxis_1') nx_phy=len + if( nx_phy == nxcase )then + allocate(work_b_tmp(nxcase,nycase)) + countloc_tmp=(/nxcase,nycase,1/) + phy_smaller_domain = .false. + else + allocate(work_b_tmp(nxcase-6,nycase-6)) + countloc_tmp=(/nxcase-6,nycase-6,1/) + phy_smaller_domain = .true. + end if + startloc_tmp=(/1,1,ilev/) + end if call check( nf90_inq_varid(gfile_loc,trim(varname),VarId) ) @@ -3703,16 +4218,29 @@ subroutine gsi_fv3ncdf_write(grd_ionouv,cstate_nouv,add_saved,filenamein,fv3file deallocate(work_b_layout) enddo else - call check( nf90_get_var(gfile_loc,VarId,work_b,start = startloc, count = countloc) ) + if( trim(varname) == 'ref_f3d' )then + work_b = 0.0_r_kind + call check( nf90_get_var(gfile_loc,VarId,work_b_tmp,start = startloc_tmp, count = countloc_tmp) ) + where(work_b_tmp < 0.0_r_kind) + work_b_tmp = 0.0_r_kind + end where + if(phy_smaller_domain)then + work_b(4:nxcase-3,4:nycase-3) = work_b_tmp + else + work_b(1:nxcase,1:nycase) = work_b_tmp + end if + else + call check( nf90_get_var(gfile_loc,VarId,work_b,start = startloc, count = countloc) ) + end if endif call fv3_h_to_ll(work_b(:,:),worka2,nlon_regional,nlat_regional,nloncase,nlatcase,grid_reverse_flag) !!!!!!!! analysis_inc: work_a !!!!!!!!!!!!!!!! work_a(:,:)=work_a(:,:)-worka2(:,:) call fv3_ll_to_h(work_a(:,:),workb2,nloncase,nlatcase,nlon_regional,nlat_regional,grid_reverse_flag) work_b(:,:)=work_b(:,:)+workb2(:,:) - else + else call fv3_ll_to_h(work_a(:,:),work_b(:,:),nloncase,nlatcase,nlon_regional,nlat_regional,grid_reverse_flag) - endif + endif endif if(fv3_io_layout_y > 1) then do nio=0,fv3_io_layout_y-1 @@ -3723,7 +4251,20 @@ subroutine gsi_fv3ncdf_write(grd_ionouv,cstate_nouv,add_saved,filenamein,fv3file deallocate(work_b_layout) enddo else - call check( nf90_put_var(gfile_loc,VarId,work_b, start = startloc, count = countloc) ) + if( trim(varname) == 'ref_f3d' )then + if(phy_smaller_domain)then + work_b_tmp = work_b(4:nxcase-3,4:nycase-3) + else + work_b_tmp = work_b(1:nxcase,1:nycase) + end if + where(work_b_tmp < 0.0_r_kind) + work_b_tmp = 0.0_r_kind + end where + call check( nf90_put_var(gfile_loc,VarId,work_b_tmp, start = startloc_tmp, count = countloc_tmp) ) + deallocate(work_b_tmp) + else + call check( nf90_put_var(gfile_loc,VarId,work_b, start = startloc, count = countloc) ) + end if endif enddo !ilevtotl loop @@ -4443,6 +4984,8 @@ subroutine getfv3lamfilevname(vgsinamein,fv3filenamegref,filenameout,vname) filenameout=fv3filenamegref%dynvars else if(ifindstrloc(vartracers,vgsinamein)> 0 ) then filenameout=fv3filenamegref%tracers + else if(ifindstrloc(varphyvars,vgsinamein)> 0) then + filenameout=fv3filenamegref%phyvars else write(6,*)'the filename corresponding to var ',trim(vgsinamein),' is not found, stop ' call stop2(333) diff --git a/src/gsi/gsimod.F90 b/src/gsi/gsimod.F90 index 55d9298614..8ccef7c38e 100644 --- a/src/gsi/gsimod.F90 +++ b/src/gsi/gsimod.F90 @@ -151,7 +151,9 @@ module gsimod readin_beta,use_localization_grid,use_gfs_ens,q_hyb_ens,i_en_perts_io, & l_ens_in_diff_time,ensemble_path,ens_fast_read,sst_staticB,limqens, & ntotensgrp,nsclgrp,naensgrp,ngvarloc,ntlevs_ens,naensloc, & - i_ensloccov4tim,i_ensloccov4var,i_ensloccov4scl,l_timloc_opt + i_ensloccov4tim,i_ensloccov4var,i_ensloccov4scl,l_timloc_opt,& + vdl_scale,vloc_varlist,& + global_spectral_filter_sd,assign_vdl_nml,parallelization_over_ensmembers use hybrid_ensemble_parameters,only : l_both_fv3sar_gfs_ens,n_ens_gfs,n_ens_fv3sar use rapidrefresh_cldsurf_mod, only: init_rapidrefresh_cldsurf, & dfi_radar_latent_heat_time_period,metar_impact_radius,& @@ -1375,6 +1377,36 @@ module gsimod ! i_ensloccov4scl - flag of cross-scale localization ! =0: cross-scale covariance is retained ! =1: cross-scale covariance is zero +! +! global_spectral_filter_sd - if true, use spectral filter function for +! scale decomposition in the global application (Huang et al. 2021) +! assign_vdl_nml - if true, vdl_scale, and vloc_varlist will be used for +! assigning variable-dependent localization upon SDL in gsiparm.anl. +! This method described in (Wang and Wang 2022, JAMES) is +! equivalent to, but different from the method associated +! with the parameter i_ensloccov4var. +! vloc_varlist - list of control variables using the same localization length, +! effective only with assign_vdl_nml=.true. For example, +! vloc_varlist(1,:) = 'sf','vp','ps','t', +! vloc_varlist(2,:) = 'q', +! vloc_varlist(3,:) = 'qr','qs','qg','dbz','w','ql','qi', +! vloc_varlist(4,:) = 'sf','vp','ps','t','q', +! vloc_varlist(5,:) = 'qr','qs','qg','dbz','w','ql','qi', +! This example indicates that 3 variable-groups will be adopted for VDL. +! 'sf','vp','ps','t' will share the same localization length of v1L1; +! 'q' will have the localization lenth of v2L1 +! 'qr','qs','qg','dbz','w','ql','qi', use the same localization length of v3L1 +! +! For L2, a different configuration of VDL can be applied: +! ~~~~~~~~~ +! 'sf','vp','ps','t','q' will share the same localization length of v2L2; +! 'qr','qs','qg','dbz','w','ql','qi', use the same localization length of v2L2 +! vdl_scale - number of variables in each variable-group, effective only with assign_vdl_nml=.true. +! if 3 variable-groups with 2 separated scale is set, +! vdl_scale = 3, 3, 3, 2, 2 +! ^ ^ ^ ^ ^ +! s_ens_h = v1L1 v2L1 v3L1 v1L2 v2L2 +! Then localization lengths will be assigned as above. ! namelist/hybrid_ensemble/l_hyb_ens,uv_hyb_ens,q_hyb_ens,aniso_a_en,generate_ens,n_ens,l_both_fv3sar_gfs_ens,n_ens_gfs,n_ens_fv3sar,nlon_ens,nlat_ens,jcap_ens,& pseudo_hybens,merge_two_grid_ensperts,regional_ensemble_option,fv3sar_bg_opt,fv3sar_ensemble_opt,full_ensemble,pwgtflg,& @@ -1382,7 +1414,9 @@ module gsimod grid_ratio_ens, & oz_univ_static,write_ens_sprd,use_localization_grid,use_gfs_ens, & i_en_perts_io,l_ens_in_diff_time,ensemble_path,ens_fast_read,sst_staticB,limqens, & - nsclgrp,l_timloc_opt,ngvarloc,naensloc,i_ensloccov4tim,i_ensloccov4var,i_ensloccov4scl + nsclgrp,l_timloc_opt,ngvarloc,naensloc,i_ensloccov4tim,i_ensloccov4var,i_ensloccov4scl,& + vdl_scale,vloc_varlist,& + global_spectral_filter_sd,assign_vdl_nml,parallelization_over_ensmembers ! rapidrefresh_cldsurf (options for cloud analysis and surface ! enhancement for RR appilcation ): @@ -1839,7 +1873,9 @@ subroutine gsimain_initialize else naensgrp=ntotensgrp endif - if(naensloc 1)then + allocate(spc_multwgt(0:jcap_ens,nsclgrp)) + allocate(spcwgt_params(4,nsclgrp)) + spc_multwgt=1.0 + + ! The below parameters are used in Huang et al. (2021, MWR) + spcwgt_params(1,1)=4000.0_r_kind + spcwgt_params(2,1)=100000000.0_r_kind + spcwgt_params(3,1)=1.0_r_kind + spcwgt_params(4,1)=3000.0_r_kind + + if( nsclgrp >=3 )then + spcwgt_params(1,3)=0.0_r_kind + spcwgt_params(2,3)=500.0_r_kind + spcwgt_params(3,3)=1.0_r_kind + spcwgt_params(4,3)=500.0_r_kind + end if + + call init_mult_spc_wgts(jcap_ens) + + end if + return end subroutine hybens_grid_setup @@ -4148,6 +4171,8 @@ subroutine hybens_localization_setup ! 2012-10-16 wu - only call setup_ens_wgt if necessary ! 2014-05-22 wu modification to allow vertically varying localization scales in regional ! 2022-09-15 yokota - add scale/variable/time-dependent localization +! 2022-12-09 Y. Wang and X. Wang - add a variable-dependent localization option (assign_vdl_nml=.true.), +! poc: xuguang.wang@ou.edu ! ! input argument list: ! @@ -4168,9 +4193,11 @@ subroutine hybens_localization_setup use hybrid_ensemble_parameters, only: readin_beta,beta_s,beta_e,beta_s0,beta_e0,sqrt_beta_s,sqrt_beta_e use hybrid_ensemble_parameters, only: readin_localization,create_hybens_localization_parameters, & vvlocal,s_ens_h,s_ens_hv,s_ens_v,s_ens_vv - use hybrid_ensemble_parameters, only: ntotensgrp,naensgrp,naensloc,ntlevs_ens,nsclgrp - use hybrid_ensemble_parameters, only: en_perts + use hybrid_ensemble_parameters, only: ntotensgrp,naensgrp,naensloc,ntlevs_ens,nsclgrp,assign_vdl_nml + use hybrid_ensemble_parameters, only: en_perts,vdl_scale,vloc_varlist,global_spectral_filter_sd + use hybrid_ensemble_parameters, only: ngvarloc use gsi_io, only: verbose + use string_utility, only: StrLowCase implicit none @@ -4183,8 +4210,11 @@ subroutine hybens_localization_setup real(r_kind),allocatable:: s_ens_h_gu_x(:,:),s_ens_h_gu_y(:,:) logical :: l_read_success type(gsi_bundle) :: a_en(n_ens) + type(gsi_bundle) :: en_pertstmp(n_ens,ntlevs_ens) + type(gsi_bundle) :: en_pertstmp1(n_ens,ntlevs_ens) type(gsi_grid) :: grid_ens real(r_kind), pointer :: values(:) => NULL() + integer(i_kind) :: iscl, iv, smooth_scales_num character(len=*),parameter::myname_=myname//'*hybens_localization_setup' l_read_success=.false. @@ -4312,7 +4342,7 @@ subroutine hybens_localization_setup call init_sf_xy(jcap_ens) endif - if(ntotensgrp>1) then + if(ntotensgrp>1 .and. (.not. global_spectral_filter_sd)) then call gsi_bundlegetpointer(en_perts(1,1,1),cvars3d,ipc3d,istatus) if(istatus/=0) then write(6,*) myname_,': cannot find 3d pointers' @@ -4326,64 +4356,159 @@ subroutine hybens_localization_setup if(nsclgrp>1) then call gsi_gridcreate(grid_ens,grd_ens%lat2,grd_ens%lon2,grd_ens%nsig) allocate(values(grd_ens%latlon11*grd_ens%nsig*n_ens)) - do ig=1,nsclgrp-1 - ii=0 - do n=1,n_ens - a_en(n)%values => values(ii+1:ii+grd_ens%latlon11*grd_ens%nsig) - call gsi_bundleset(a_en(n),grid_ens,'Ensemble Bundle',istatus,names3d=(/'a_en'/),bundle_kind=r_kind) - if (istatus/=0) then - write(6,*) myname_,': error alloc(ensemble bundle)' - call stop2(999) - endif - ii=ii+grd_ens%latlon11*grd_ens%nsig - enddo - do m=1,ntlevs_ens + if( .not. assign_vdl_nml )then + do ig=1,nsclgrp-1 + ii=0 do n=1,n_ens - en_perts(n,ig+1,m)%valuesr4=en_perts(n,ig,m)%valuesr4 + a_en(n)%values => values(ii+1:ii+grd_ens%latlon11*grd_ens%nsig) + call gsi_bundleset(a_en(n),grid_ens,'Ensemble Bundle',istatus,names3d=(/'a_en'/),bundle_kind=r_kind) + if (istatus/=0) then + write(6,*) myname_,': error alloc(ensemble bundle)' + call stop2(999) + endif + ii=ii+grd_ens%latlon11*grd_ens%nsig enddo - do ic3=1,nc3d - ipic=ipc3d(ic3) + do m=1,ntlevs_ens do n=1,n_ens - do k=1,grd_ens%nsig - a_en(n)%r3(1)%q(:,:,k)=en_perts(n,ig,m)%r3(ipic)%qr4(:,:,k) + en_perts(n,ig+1,m)%valuesr4=en_perts(n,ig,m)%valuesr4 + enddo + do ic3=1,nc3d + ipic=ipc3d(ic3) + do n=1,n_ens + do k=1,grd_ens%nsig + a_en(n)%r3(1)%q(:,:,k)=en_perts(n,ig,m)%r3(ipic)%qr4(:,:,k) + enddo + enddo + call bkgcov_a_en_new_factorization(naensgrp+ig,a_en) + do n=1,n_ens + do k=1,grd_ens%nsig + en_perts(n,ig,m)%r3(ipic)%qr4(:,:,k)=a_en(n)%r3(1)%q(:,:,k) + enddo enddo enddo - call bkgcov_a_en_new_factorization(naensgrp+ig,a_en) - do n=1,n_ens - do k=1,grd_ens%nsig - en_perts(n,ig,m)%r3(ipic)%qr4(:,:,k)=a_en(n)%r3(1)%q(:,:,k) + do ic2=1,nc2d + ipic=ipc2d(ic2) + do n=1,n_ens + do k=1,grd_ens%nsig + a_en(n)%r3(1)%q(:,:,k)=en_perts(n,ig,m)%r2(ipic)%qr4(:,:) + enddo + enddo + call bkgcov_a_en_new_factorization(naensgrp+ig,a_en) + do n=1,n_ens + en_perts(n,ig,m)%r2(ipic)%qr4(:,:)=a_en(n)%r3(1)%q(:,:,1) enddo enddo - enddo - do ic2=1,nc2d - ipic=ipc2d(ic2) do n=1,n_ens - do k=1,grd_ens%nsig - a_en(n)%r3(1)%q(:,:,k)=en_perts(n,ig,m)%r2(ipic)%qr4(:,:) + en_perts(n,ig+1,m)%valuesr4=en_perts(n,ig+1,m)%valuesr4-en_perts(n,ig,m)%valuesr4 + enddo + enddo + do n=1,n_ens + call gsi_bundleunset(a_en(n),istatus) + enddo + enddo + else ! assign_vdl_nml + smooth_scales_num = naensloc - naensgrp + ngvarloc = 1 ! forced to 1 in this option + do n = 1, n_ens + do m = 1, ntlevs_ens + call gsi_bundlecreate(en_pertstmp(n,m),grid_ens,'ensemble2',istatus,names2d=cvars2d,names3d=cvars3d,bundle_kind=r_single) + call gsi_bundlecreate(en_pertstmp1(n,m),grid_ens,'ensemble1',istatus,names2d=cvars2d,names3d=cvars3d,bundle_kind=r_single) + end do + end do + ig = 1 + do iscl=1,smooth_scales_num + 1 + ii=0 + do n=1,n_ens + a_en(n)%values => values(ii+1:ii+grd_ens%latlon11*grd_ens%nsig) + call gsi_bundleset(a_en(n),grid_ens,'Ensemble Bundle',istatus,names3d=(/'a_en'/),bundle_kind=r_kind) + if (istatus/=0) then + write(6,*) myname_,': error alloc(ensemble bundle)' + call stop2(999) + endif + ii=ii+grd_ens%latlon11*grd_ens%nsig + enddo + + do m=1,ntlevs_ens + if( ig == 1 )then + do n=1,n_ens + en_pertstmp(n,m)%valuesr4=en_perts(n,ig,m)%valuesr4 + enddo + end if + do ic3=1,nc3d + ipic=ipc3d(ic3) + do n=1,n_ens + do k=1,grd_ens%nsig + a_en(n)%r3(1)%q(:,:,k)=en_pertstmp(n,m)%r3(ipic)%qr4(:,:,k) + enddo + enddo + if(iscl <= smooth_scales_num) call bkgcov_a_en_new_factorization(naensgrp+iscl,a_en) + do n=1,n_ens + do k=1,grd_ens%nsig + en_pertstmp1(n,m)%r3(ipic)%qr4(:,:,k)=a_en(n)%r3(1)%q(:,:,k) + if( vdl_scale(ig) == 0 )then + en_perts(n,ig,m)%r3(ipic)%qr4(:,:,k)=a_en(n)%r3(1)%q(:,:,k) + else ! VDL is activated + do iv = 1, vdl_scale(ig) + en_perts(n,ig+iv-1,m)%r3(ipic)%qr4(:,:,k)=0.0_r_single + if( any( trim(StrLowCase(cvars3d(ic3))) == vloc_varlist(ig+iv-1,:) ) ) then + en_perts(n,ig+iv-1,m)%r3(ipic)%qr4(:,:,k)=a_en(n)%r3(1)%q(:,:,k) + end if + end do + end if + enddo + enddo + enddo + do ic2=1,nc2d + ipic=ipc2d(ic2) + do n=1,n_ens + do k=1,grd_ens%nsig + a_en(n)%r3(1)%q(:,:,k)=en_pertstmp(n,m)%r2(ipic)%qr4(:,:) + enddo + enddo + if(iscl <= smooth_scales_num) call bkgcov_a_en_new_factorization(naensgrp+iscl,a_en) + do n=1,n_ens + en_pertstmp1(n,m)%r2(ipic)%qr4(:,:)=a_en(n)%r3(1)%q(:,:,1) + if( vdl_scale(ig) == 0 )then + en_perts(n,ig,m)%r2(ipic)%qr4(:,:)=a_en(n)%r3(1)%q(:,:,1) + else ! VDL is activated + do iv = 1, vdl_scale(ig) + en_perts(n,ig+iv-1,m)%r2(ipic)%qr4(:,:)=0.0_r_single + if( any( trim(StrLowCase(cvars2d(ic2))) == vloc_varlist(ig+iv-1,:) ) ) then + en_perts(n,ig+iv-1,m)%r2(ipic)%qr4(:,:)=a_en(n)%r3(1)%q(:,:,1) + end if + end do + end if enddo enddo - call bkgcov_a_en_new_factorization(naensgrp+ig,a_en) do n=1,n_ens - en_perts(n,ig,m)%r2(ipic)%qr4(:,:)=a_en(n)%r3(1)%q(:,:,1) + en_pertstmp(n,m)%valuesr4=en_pertstmp(n,m)%valuesr4-en_pertstmp1(n,m)%valuesr4 enddo enddo do n=1,n_ens - en_perts(n,ig+1,m)%valuesr4=en_perts(n,ig+1,m)%valuesr4-en_perts(n,ig,m)%valuesr4 + call gsi_bundleunset(a_en(n),istatus) enddo + if( vdl_scale(ig) == 0 )then + ig = ig + 1 + else + ig = ig + vdl_scale(ig) + end if enddo do n=1,n_ens - call gsi_bundleunset(a_en(n),istatus) - enddo - enddo - deallocate(values) - endif - do ig=nsclgrp+1,ntotensgrp - do m=1,ntlevs_ens - do n=1,n_ens - en_perts(n,ig,m)%valuesr4=en_perts(n,ig-nsclgrp,m)%valuesr4 - enddo - enddo - enddo + do m=1,ntlevs_ens + call gsi_bundledestroy(en_pertstmp(n,m),istatus) + call gsi_bundledestroy(en_pertstmp1(n,m),istatus) + end do + end do + end if + deallocate(values) + endif + do ig=nsclgrp+1,ntotensgrp + do m=1,ntlevs_ens + do n=1,n_ens + en_perts(n,ig,m)%valuesr4=en_perts(n,ig-nsclgrp,m)%valuesr4 + enddo + enddo + enddo endif !!!!!!!! setup beta_s, beta_e!!!!!!!!!!!! diff --git a/src/gsi/hybrid_ensemble_parameters.f90 b/src/gsi/hybrid_ensemble_parameters.f90 index 7b1c963764..17416f68fb 100644 --- a/src/gsi/hybrid_ensemble_parameters.f90 +++ b/src/gsi/hybrid_ensemble_parameters.f90 @@ -128,6 +128,7 @@ module hybrid_ensemble_parameters ! function of z, default = .false. ! ensemble_path: path to ensemble members; default './' ! ens_fast_read: read ensemble in parallel; default '.false.' +! parallelization_over_ensmembers: parallelly read ensemble members for FV3-LAM; default '.false' ! sst_staticB: if .true. (default) uses only static part of B error covariance for SST ! nsclgrp: number of scale-dependent localization lengths ! l_timloc_opt: if true, then turn on time-dependent localization @@ -327,10 +328,17 @@ module hybrid_ensemble_parameters public :: i_ensloccov4tim,i_ensloccov4var,i_ensloccov4scl public :: idaen3d,idaen2d public :: ens_fast_read + public :: parallelization_over_ensmembers public :: l_both_fv3sar_gfs_ens public :: sst_staticB public :: limqens + public :: spc_multwgt + public :: spcwgt_params + public :: vdl_scale,vloc_varlist + public :: global_spectral_filter_sd + public :: assign_vdl_nml + logical l_hyb_ens,uv_hyb_ens,q_hyb_ens,oz_univ_static,sst_staticB logical l_timloc_opt logical aniso_a_en @@ -348,6 +356,7 @@ module hybrid_ensemble_parameters logical vvlocal logical l_ens_in_diff_time logical ens_fast_read + logical parallelization_over_ensmembers logical l_both_fv3sar_gfs_ens integer(i_kind) i_en_perts_io integer(i_kind) n_ens,nlon_ens,nlat_ens,jcap_ens,jcap_ens_test @@ -389,6 +398,13 @@ module hybrid_ensemble_parameters integer(i_kind) :: i_ensloccov4scl=0 integer(i_kind),allocatable,dimension(:) :: idaen3d,idaen2d + real(r_kind),allocatable,dimension(:,:) :: spc_multwgt + real(r_kind),allocatable,dimension(:,:) :: spcwgt_params + character(len=3) vloc_varlist(max_naensloc,max_nvars) + integer(i_kind) vdl_scale(max_naensloc) + logical :: global_spectral_filter_sd + logical :: assign_vdl_nml + ! following is for storage of ensemble perturbations: ! def en_perts - array of ensemble perturbations @@ -476,10 +492,15 @@ subroutine init_hybrid_ensemble_parameters i_en_perts_io=0 ! default for en_pert IO. 0 is no IO ensemble_path = './' ! default for path to ensemble members ens_fast_read=.false. + parallelization_over_ensmembers=.false. limqens=1.0_r_single ! default for limiting ensemble RH (+/-) l_both_fv3sar_gfs_ens=.false. n_ens_gfs=0 n_ens_fv3sar=0 + vdl_scale = 0 + vloc_varlist = 'aaa' + global_spectral_filter_sd=.false. + assign_vdl_nml=.false. end subroutine init_hybrid_ensemble_parameters diff --git a/src/gsi/obsmod.F90 b/src/gsi/obsmod.F90 index 1fb03d0940..3dd936d94e 100644 --- a/src/gsi/obsmod.F90 +++ b/src/gsi/obsmod.F90 @@ -747,7 +747,7 @@ subroutine init_obsmod_dflts if_vterminal=.false. l2rwthin =.false. if_vrobs_raw=.false. - if_model_dbz=.true. + if_model_dbz=.false. inflate_obserr=.false. whichradar="KKKK" diff --git a/src/gsi/read_dbz_nc.f90 b/src/gsi/read_dbz_nc.f90 index 89eebde8b6..ee1d3cb2e4 100644 --- a/src/gsi/read_dbz_nc.f90 +++ b/src/gsi/read_dbz_nc.f90 @@ -71,7 +71,7 @@ subroutine read_dbz_nc(nread,ndata,nodata,infile,lunout,obstype,sis,hgtl_full,no one_tenth,r1000,r60,r60inv,r100,r400,grav_equator, & eccentricity,somigliana,grav_ratio,grav,semi_major_axis,flattening use gridmod, only: tll2xy,nsig,nlat,nlon - use obsmod, only: iadate,doradaroneob, & + use obsmod, only: iadate,doradaroneob,oneoblat,oneoblon,oneobheight,oneobradid, & mintiltdbz,maxtiltdbz,minobrangedbz,maxobrangedbz,& static_gsi_nopcp_dbz,rmesh_dbz,zmesh_dbz use hybrid_ensemble_parameters,only : l_hyb_ens @@ -380,6 +380,12 @@ subroutine read_dbz_nc(nread,ndata,nodata,infile,lunout,obstype,sis,hgtl_full,no thislon = lon(i,j) thislat = lat(i,j) + + if(doradaroneob) then + thislat=oneoblat + thislon=oneoblon + thishgt=oneobheight + endif !-Check format of longitude and correct if necessary diff --git a/src/gsi/setupdbz.f90 b/src/gsi/setupdbz.f90 index 9bbf5ed34b..1e158de9ea 100644 --- a/src/gsi/setupdbz.f90 +++ b/src/gsi/setupdbz.f90 @@ -1451,31 +1451,37 @@ subroutine setupdbz(obsLL,odiagLL,lunin,mype,bwork,awork,nele,nobs,is,radardbz_d ! Write information to diagnostic file if(radardbz_diagsave .and. ii>0 )then - write(string,600) jiter -600 format('radardbz_',i2.2) - diag_file=trim(dirname) // trim(string) - if(init_pass) then - open(newunit=lu_diag,file=trim(diag_file),form='unformatted',status='unknown',position='rewind') + if( .not. l_use_dbz_directDA )then + write(7)'dbz',nchar,nreal,ii,mype,ioff0 + write(7)cdiagbuf(1:ii),rdiagbuf(:,1:ii) + deallocate(cdiagbuf,rdiagbuf) else - inquire(file=trim(diag_file),exist=diagexist) - if (diagexist) then - open(lu_diag,file=trim(diag_file),form='unformatted',status='old',position='append') + write(string,600) jiter +600 format('radardbz_',i2.2) + diag_file=trim(dirname) // trim(string) + if(init_pass) then + open(newunit=lu_diag,file=trim(diag_file),form='unformatted',status='unknown',position='rewind') else - open(lu_diag,file=trim(diag_file),form='unformatted',status='unknown',position='rewind') + inquire(file=trim(diag_file),exist=diagexist) + if (diagexist) then + open(lu_diag,file=trim(diag_file),form='unformatted',status='old',position='append') + else + open(lu_diag,file=trim(diag_file),form='unformatted',status='unknown',position='rewind') + endif + endif + if(init_pass .and. mype == 0) then + if ( .not. l_use_dbz_directDA ) then ! EnKF uses these diagnostics and EnKF uses single OBS file for now. + write(lu_diag) ianldate ! So do not write analysis date for binary in case of using direct reflectivity DA. + end if + write(6,*)'SETUPDBZ: write time record to file ',& + trim(diag_file), ' ',ianldate endif - endif - if(init_pass .and. mype == 0) then - if ( .not. l_use_dbz_directDA ) then ! EnKF uses these diagnostics and EnKF uses single OBS file for now. - write(lu_diag) ianldate ! So do not write analysis date for binary in case of using direct reflectivity DA. - end if - write(6,*)'SETUPDBZ: write time record to file ',& - trim(diag_file), ' ',ianldate - endif - write(lu_diag)'dbz',nchar,nreal,ii,mype,ioff0 - write(lu_diag)cdiagbuf(1:ii),rdiagbuf(:,1:ii) - deallocate(cdiagbuf,rdiagbuf) - close(lu_diag) + write(lu_diag)'dbz',nchar,nreal,ii,mype,ioff0 + write(lu_diag)cdiagbuf(1:ii),rdiagbuf(:,1:ii) + deallocate(cdiagbuf,rdiagbuf) + close(lu_diag) + end if end if write(6,*)'mype, irefsmlobs,irejrefsmlobs are ',mype,' ',irefsmlobs, ' ',irejrefsmlobs ! close(52) !simulated obs diff --git a/src/gsi/update_guess.f90 b/src/gsi/update_guess.f90 index a90e7a19d6..e5a0f64245 100644 --- a/src/gsi/update_guess.f90 +++ b/src/gsi/update_guess.f90 @@ -287,7 +287,11 @@ subroutine update_guess(sval,sbias) ! since we don't know which comes first in met-guess, we ! must postpone updating tv after all other met-guess fields endif - icloud=getindex(cloud,guess(ic)) + if( allocated(cloud) )then + icloud=getindex(cloud,guess(ic)) + else + icloud=-999 + end if if ( .not. l_use_dbz_directDA ) then ! original code if(icloud>0) then ptr3dges = max(ptr3dges+ptr3dinc,zero) diff --git a/ush/sub_jet b/ush/sub_jet index 5bd9a6d68c..e11be1280c 100755 --- a/ush/sub_jet +++ b/ush/sub_jet @@ -88,7 +88,7 @@ output=${output:-$jobname.out} myuser=$LOGNAME myhost=$(hostname) -DATA=$regdir/regtests/data +DATA=${DATA:-$ptmp/tmp} mkdir -p $DATA @@ -117,6 +117,7 @@ echo "#SBATCH --time=$timew" echo "#SBATCH --nodes=$nodes --ntasks-per-node=$procs --cpus-per-task=$threads" >> $cfile #echo "#SBATCH -j oe" >> $cfile echo "#SBATCH --account=$accnt" >> $cfile +echo "#SBATCH --mem=0" >> $cfile echo "#SBATCH --partition=kjet" >> $cfile #echo "#SBATCH -V" >> $cfile #echo "#PBS -d" >> $cfile