From aa656c6ee23d9a8d253be3dad95615b29ac0d033 Mon Sep 17 00:00:00 2001 From: Jeff Whitaker Date: Fri, 22 Sep 2023 08:05:11 -0600 Subject: [PATCH 1/2] fix for zero total ozone pressure in EnKF (issue #625) (#626) The fix involves setting the pressure to 0.001Pa for ozone obs that have zero pressure (to avoid Inf when log(p) calculated), and turning off ob-space vertical localization. Has no effect on current operational setup which uses model-space vertical localization (modelspace_vloc=T). --------- Co-authored-by: jswhit2 --- src/enkf/enkf_obsmod.f90 | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/enkf/enkf_obsmod.f90 b/src/enkf/enkf_obsmod.f90 index ba4b2946b1..ea8f6446fb 100644 --- a/src/enkf/enkf_obsmod.f90 +++ b/src/enkf/enkf_obsmod.f90 @@ -262,7 +262,6 @@ subroutine readobs() allocate(corrlengthsq(nobstot),lnsigl(nobstot),obtimel(nobstot)) lnsigl=1.e10 do nob=1,nobstot - oblnp(nob) = -log(obpress(nob)) ! distance measured in log(p) units if (obloclon(nob) < zero) obloclon(nob) = obloclon(nob) + 360._r_single radlon=deg2rad*obloclon(nob) radlat=deg2rad*obloclat(nob) @@ -283,6 +282,13 @@ subroutine readobs() lnsigl(nob)=latval(deglat,lnsigcutoffnh,lnsigcutofftr,lnsigcutoffsh) end if endif + ! total column ozone has pressure set to zero, set to 0.001Pa + ! and turn vertical localization off (no effect if modelspace_vloc=T) + if (obpress(nob) < 0.001 .and. obtype(nob)(1:3) .eq. ' oz') then + lnsigl(nob) = 1.e30 ! turn ob-space vert localization off + obpress(nob) = 0.001 ! set to a non-zero value + endif + oblnp(nob) = -log(obpress(nob)) ! distance measured in log(p) units corrlengthsq(nob)=latval(deglat,corrlengthnh,corrlengthtr,corrlengthsh)**2 if ( (obtype(nob)(1:3) == 'dbz' .or. obtype(nob)(1:3) == ' rw') .and. l_use_enkf_directZDA ) then corrlengthsq(nob)=latval(deglat,corrlengthrdrnh,corrlengthrdrtr,corrlengthrdrsh)**2 From 2f4e7fe8124603c6ac8c14ed2cf1aa393d6ec07d Mon Sep 17 00:00:00 2001 From: jderber-NOAA <75998838+jderber-NOAA@users.noreply.github.com> Date: Fri, 22 Sep 2023 12:17:22 -0400 Subject: [PATCH 2/2] Optimize the reading of ensembles and setup for global multiscale runs (#594) This update improves the efficiency of the GSI, especially for multiscale runs. Details can be found in issue #585 --- src/gsi/apply_scaledepwgts.f90 | 152 ++++++--- src/gsi/control_vectors.f90 | 8 +- src/gsi/cplr_gfs_ensmod.f90 | 263 ++++++++-------- src/gsi/general_specmod.f90 | 31 -- src/gsi/general_spectral_transforms.f90 | 4 +- src/gsi/general_sub2grid_mod.f90 | 167 ++++++++++ src/gsi/get_gefs_ensperts_dualres.f90 | 402 +++++++++++++----------- src/gsi/hybrid_ensemble_isotropic.F90 | 265 ++++++++-------- src/gsi/read_iasi.f90 | 6 +- src/gsi/read_prepbufr.f90 | 6 +- src/gsi/setupcldtot.F90 | 19 +- src/gsi/setuprad.f90 | 10 +- src/gsi/stpcalc.f90 | 3 +- 13 files changed, 790 insertions(+), 546 deletions(-) diff --git a/src/gsi/apply_scaledepwgts.f90 b/src/gsi/apply_scaledepwgts.f90 index e97b6fb614..e4952b28fa 100644 --- a/src/gsi/apply_scaledepwgts.f90 +++ b/src/gsi/apply_scaledepwgts.f90 @@ -16,18 +16,18 @@ function fwgtofwvlen (rvlft,rvrgt,rcons,rlen,rinput) ! !$$$ end documentation block - use kinds, only: r_kind,i_kind,r_single + use kinds, only: r_kind implicit none real(r_kind),intent(in) :: rvlft,rvrgt,rcons,rlen,rinput real(r_kind) :: fwgtofwvlen real(r_kind) :: rlen1,rtem1,rconshalf - rlen1=rlen/10.0_r_kind ! rlen corresponds to a (-5,5) region - rconshalf=0.5_r_kind*rcons if(rinput > rvlft .and. rinput < rvrgt) then fwgtofwvlen=rcons else + rlen1=rlen/10.0_r_kind ! rlen corresponds to a (-5,5) region + rconshalf=0.5_r_kind*rcons rtem1=min(abs(rinput-rvlft),abs(rinput-rvrgt)) fwgtofwvlen=rconshalf*(1.0_r_kind+tanh(5.0_r_kind-rtem1/rlen1)) endif @@ -41,23 +41,21 @@ subroutine init_mult_spc_wgts(jcap_in) ! !$$$ end documentation block - use kinds, only: r_kind,i_kind,r_single - use constants, only: zero,half,one,two,three,rearth,pi,tiny_r_kind + use kinds, only: r_kind,i_kind + use constants, only: zero,half,one,rearth,pi,tiny_r_kind use mpimod, only: mype - use general_sub2grid_mod, only: general_sub2grid_create_info - use egrid2agrid_mod,only: g_create_egrid2agrid - use general_sub2grid_mod, only: sub2grid_info use hybrid_ensemble_parameters, only: nsclgrp use hybrid_ensemble_parameters, only: spc_multwgt,spcwgt_params,r_ensloccov4scl implicit none integer(i_kind),intent(in ) :: jcap_in - integer(i_kind) i + integer(i_kind) i,l,ks integer(i_kind) ig - real(r_kind) rwv0,rtem1,rtem2 - real (r_kind):: fwgtofwvlen + real(r_kind) :: rwv0,rtem1,rtem2 + real(r_kind) :: fwgtofwvlen real(r_kind) :: totwvlength + real(r_kind),dimension(0:jcap_in,nsclgrp) :: spcwgt logical :: l_sum_spc_weights ! Spectral scale decomposition is differernt between SDL-cross and SDL-nocross @@ -67,9 +65,9 @@ subroutine init_mult_spc_wgts(jcap_in) l_sum_spc_weights = .true. end if - spc_multwgt(0,1)=one + spcwgt(0,1)=one do ig=2,nsclgrp - spc_multwgt(0,ig)=zero + spcwgt(0,ig)=zero end do @@ -79,13 +77,13 @@ subroutine init_mult_spc_wgts(jcap_in) rtem1=zero do ig=1,nsclgrp if(ig /= 2) then - spc_multwgt(i,ig)=fwgtofwvlen(spcwgt_params(1,ig),spcwgt_params(2,ig),& - spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength) - spc_multwgt(i,ig)=min(max(spc_multwgt(i,ig),zero),one) + spcwgt(i,ig)=fwgtofwvlen(spcwgt_params(1,ig),spcwgt_params(2,ig),& + spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength) + spcwgt(i,ig)=min(max(spcwgt(i,ig),zero),one) if(l_sum_spc_weights) then - rtem1=rtem1+spc_multwgt(i,ig) + rtem1=rtem1+spcwgt(i,ig) else - rtem1=rtem1+spc_multwgt(i,ig)*spc_multwgt(i,ig) + rtem1=rtem1+spcwgt(i,ig)*spcwgt(i,ig) endif endif enddo @@ -93,20 +91,52 @@ subroutine init_mult_spc_wgts(jcap_in) if(rtem2 >= zero) then if(l_sum_spc_weights) then - spc_multwgt(i,2)=rtem2 + spcwgt(i,2)=rtem2 else - spc_multwgt(i,2)=sqrt(rtem2) + spcwgt(i,2)=sqrt(rtem2) endif else - if(mype == 0)write(6,*) ' rtem2 < zero ',i,rtem2,(spc_multwgt(i,ig),ig=1,nsclgrp) - spc_multwgt(i,2)=zero + if(mype == 0)write(6,*) ' rtem2 < zero ',i,rtem2,(spcwgt(i,ig),ig=1,nsclgrp) + spcwgt(i,2)=zero endif enddo +!! Code borrowed from spvar in splib + + spc_multwgt = zero + do ig=1,nsclgrp + do i=0,jcap_in + ks=2*i + spc_multwgt(ks+1,ig)=spcwgt(i,ig) + end do + do i=0,jcap_in + do l=MAX(1,i-jcap_in),MIN(i,jcap_in) + ks=l*(2*jcap_in+1-l)+2*i + spc_multwgt(ks+1,ig) = spcwgt(i,ig) + spc_multwgt(ks+2,ig) = spcwgt(i,ig) + end do + end do + end do + return end subroutine init_mult_spc_wgts +subroutine destroy_mult_spc_wgts +!$$$ subprogram documentation block +! +! subprogram: destroy_mult_spc_wgts +! +!$$$ end documentation block + + use hybrid_ensemble_parameters, only: spc_multwgt,spcwgt_params + implicit none + + deallocate(spc_multwgt,spcwgt_params) -subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2) + return +end subroutine destroy_mult_spc_wgts + + +subroutine apply_scaledepwgts(m,grd_in,sp_in) ! ! Program history log: ! 2017-03-30 J. Kay, X. Wang - copied from Kleist's apply_scaledepwgts and @@ -115,49 +145,67 @@ subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2) ! use constants, only: one use control_vectors, only: control_vector - use kinds, only: r_kind,i_kind - use kinds, only: r_single - use general_specmod, only: general_spec_multwgt + use kinds, only: r_kind,i_kind,r_single use gsi_bundlemod, only: gsi_bundle use general_sub2grid_mod, only: general_sub2grid,general_grid2sub use general_specmod, only: spec_vars use general_sub2grid_mod, only: sub2grid_info - use mpimod, only: mpi_comm_world,mype + use hybrid_ensemble_parameters, only: spc_multwgt,en_perts,nsclgrp,n_ens + use mpimod, only: mype implicit none ! Declare passed variables - type(gsi_bundle),intent(in) :: wbundle - type(gsi_bundle),intent(inout) :: wbundle2 + integer,intent(in) :: m type(spec_vars),intent (in):: sp_in type(sub2grid_info),intent(in)::grd_in - real(r_kind),dimension(0:sp_in%jcap),intent(in):: spwgts ! Declare local variables - integer(i_kind) kk + integer(i_kind) kk,ig,n,ig2,i,j - real(r_kind),dimension(grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc) :: hwork - real(r_kind),dimension(grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc) :: work - real(r_kind),dimension(sp_in%nc):: spc1 - -! Beta1 first -! Get from subdomains to - call general_sub2grid(grd_in,wbundle%values,hwork) - work=reshape(hwork,(/grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc/)) - - do kk=1,grd_in%nlevs_alloc -! Transform from physical space to spectral space - call general_g2s0(grd_in,sp_in,spc1,work(:,:,kk)) - -! Apply spectral weights - call general_spec_multwgt(sp_in,spc1,spwgts) -! Transform back to physical space - call general_s2g0(grd_in,sp_in,spc1,work(:,:,kk)) + real(r_single),dimension(grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc,nsclgrp) :: hwork2 + real(r_kind),dimension(grd_in%nlat,grd_in%nlon) :: work + real(r_kind),dimension(sp_in%nc,grd_in%nlevs_alloc):: spc1 + real(r_kind),dimension(sp_in%nc):: spc2 + + do n=1,n_ens +! Get from subdomains to full grid + call general_sub2grid(grd_in,en_perts(n,1,m)%valuesr4(:),hwork2(:,:,:,1)) + +!$omp parallel do schedule(static,1) private(i,j,kk,work) + do kk=1,grd_in%nlevs_loc + do j=1,grd_in%nlon + do i=1,grd_in%nlat + work(i,j)=hwork2(i,j,kk,1) + end do + end do +! Transform from physical space to spectral space + call general_g2s0(grd_in,sp_in,spc1(1,kk),work) + + end do +!$omp parallel do schedule(static,1) private(kk,ig,ig2,i,j,work,spc2) + do ig2=1,nsclgrp*grd_in%nlevs_loc + ig=(ig2-1)/grd_in%nlevs_loc+1 + kk=ig2-(ig-1)*grd_in%nlevs_loc + + do i=1,sp_in%nc + spc2(i)=spc1(i,kk)*spc_multwgt(i,ig) + end do +! Apply spectral weights +! Transform back to physical space + call general_s2g0(grd_in,sp_in,spc2,work) + + do j=1,grd_in%nlon + do i=1,grd_in%nlat + hwork2(i,j,kk,ig)=work(i,j) + end do + end do + end do + do ig=1,nsclgrp +! Transfer work back to subdomains + call general_grid2sub(grd_in,hwork2(:,:,:,ig),en_perts(n,ig,m)%valuesr4(:)) + end do end do -! Transfer work back to subdomains - hwork=reshape(work,(/grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc/)) - call general_grid2sub(grd_in,hwork,wbundle2%values) - return end subroutine apply_scaledepwgts diff --git a/src/gsi/control_vectors.f90 b/src/gsi/control_vectors.f90 index 97578124d2..73f605b95f 100644 --- a/src/gsi/control_vectors.f90 +++ b/src/gsi/control_vectors.f90 @@ -897,21 +897,23 @@ real(r_quad) function qdot_prod_sub(xcv,ycv) itot=max(m3d,0)+max(m2d,0) if(l_hyb_ens)itot=itot+n_ens*naensgrp allocate(partsum(itot)) + partsum=zero_quad do ii=1,nsubwin !$omp parallel do schedule(dynamic,1) private(i) do i = 1,m3d - partsum(i) = dplevs(xcv%step(ii)%r3(i)%q,ycv%step(ii)%r3(i)%q,ihalo=1) + partsum(i) = partsum(i)+dplevs(xcv%step(ii)%r3(i)%q,ycv%step(ii)%r3(i)%q,ihalo=1) enddo !$omp parallel do schedule(dynamic,1) private(i) do i = 1,m2d - partsum(m3d+i) = dplevs(xcv%step(ii)%r2(i)%q,ycv%step(ii)%r2(i)%q,ihalo=1) + partsum(m3d+i) = partsum(m3d+i)+dplevs(xcv%step(ii)%r2(i)%q,ycv%step(ii)%r2(i)%q,ihalo=1) enddo if(l_hyb_ens) then do ig=1,naensgrp nigtmp=n_ens*(ig-1) !$omp parallel do schedule(dynamic,1) private(i) do i = 1,n_ens - partsum(m3d+m2d+nigtmp+i) = dplevs(xcv%aens(ii,ig,i)%r3(1)%q,ycv%aens(ii,ig,i)%r3(1)%q,ihalo=1) + partsum(m3d+m2d+nigtmp+i) = partsum(m3d+m2d+nigtmp+i) + & + dplevs(xcv%aens(ii,ig,i)%r3(1)%q,ycv%aens(ii,ig,i)%r3(1)%q,ihalo=1) end do end do end if diff --git a/src/gsi/cplr_gfs_ensmod.f90 b/src/gsi/cplr_gfs_ensmod.f90 index 550f85d209..6f7d1184c5 100644 --- a/src/gsi/cplr_gfs_ensmod.f90 +++ b/src/gsi/cplr_gfs_ensmod.f90 @@ -29,6 +29,7 @@ module get_gfs_ensmod_mod integer(i_kind) :: kas,kae,kasm,kaem,kaemz,mas,mae,masm,maem,maemz integer(i_kind) :: ibs,ibe,ibsm,ibem,ibemz,jbs,jbe,jbsm,jbem,jbemz integer(i_kind) :: kbs,kbe,kbsm,kbem,kbemz,mbs,mbe,mbsm,mbem,mbemz + integer(i_kind) :: icw,iql,iqi,iqr,iqs,iqg integer(i_kind) :: n2d type(genex_info) :: s_a2b @@ -180,13 +181,13 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & use gsi_4dvar, only: ens_fhrlevs use gsi_bundlemod, only: gsi_bundle use gsi_bundlemod, only : assignment(=) - use hybrid_ensemble_parameters, only: n_ens,grd_ens + use hybrid_ensemble_parameters, only: n_ens,grd_ens,ntlevs_ens use hybrid_ensemble_parameters, only: ensemble_path - use control_vectors, only: nc2d,nc3d - !use control_vectors, only: cvars2d,cvars3d + use control_vectors, only: cvars2d,cvars3d,nc2d,nc3d use genex_mod, only: genex_create_info,genex,genex_destroy_info use gridmod, only: use_gfs_nemsio use jfunc, only: cnvw_option + use mpeu_util, only: getindex implicit none @@ -206,9 +207,9 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & integer(i_kind) :: io_pe,n_io_pe_s,n_io_pe_e,n_io_pe_em,i_ens integer(i_kind) :: ip integer(i_kind) :: nlon,nlat,nsig - integer(i_kind),dimension(n_ens) :: io_pe0 + integer(i_kind),dimension(n_ens) :: io_pe0,iretx real(r_single),allocatable,dimension(:,:,:,:) :: en_full,en_loc - real(r_kind),allocatable,dimension(:) :: sloc + real(r_single),allocatable,dimension(:,:,:) :: sloc integer(i_kind),allocatable,dimension(:) :: m_cvars2dw,m_cvars3dw integer(i_kind) :: m_cvars2d(nc2d),m_cvars3d(nc3d) type(sub2grid_info) :: grd3d @@ -234,10 +235,10 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & !!!!!!!!for example, n2d = nc3d*nsig + nc2d n2d=nc3d*grd_ens%nsig+nc2d - ias=1 ; iae=0 ; jas=1 ; jae=0 ; kas=1 ; kae=0 ; mas=1 ; mae=0 + ias=0 ; iae=0 ; jas=0 ; jae=0 ; kas=1 ; kae=0 ; mas=1 ; mae=0 if(mype==io_pe) then - iae=nlat - jae=nlon + iae=nlat+1 + jae=nlon+1 kae=n2d mas=n_io_pe_s ; mae=n_io_pe_em endif @@ -248,8 +249,13 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & ibe=ibs+grd_ens%lat1-1 jbs=grd_ens%jstart(mype+1) jbe=jbs+grd_ens%lon1-1 + jbs=jbs-1 + jbe=jbe+1 + ibs=ibs-1 + ibe=ibe+1 + + ibsm=ibs ; ibem=ibe ; jbsm=jbs ; jbem=jbe - ibsm=ibs-ip ; ibem=ibe+ip ; jbsm=jbs-ip ; jbem=jbe+ip kbs =1 ; kbe =n2d ; mbs =1 ; mbe =n_ens kbsm=kbs ; kbem=kbe ; mbsm=mbs ; mbem=mbe iaemz=max(iasm,iaem) ; jaemz=max(jasm,jaem) @@ -273,8 +279,6 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & m_cvars2dw=-999 m_cvars3dw=-999 - - !! read ensembles if ( mas == mae ) then @@ -305,45 +309,56 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & allocate(en_full(1,1,1,1)) end if +! scatter to subdomains: + call mpi_allreduce(m_cvars2dw,m_cvars2d,nc2d,mpi_integer4,mpi_max,mpi_comm_world,ierror) call mpi_allreduce(m_cvars3dw,m_cvars3d,nc3d,mpi_integer4,mpi_max,mpi_comm_world,ierror) - deallocate(m_cvars2dw,m_cvars3dw) -! scatter to subdomains: + ! Check hydrometeors in control variables + icw=getindex(cvars3d,'cw') + iql=getindex(cvars3d,'ql') + iqi=getindex(cvars3d,'qi') + iqr=getindex(cvars3d,'qr') + iqs=getindex(cvars3d,'qs') + iqg=getindex(cvars3d,'qg') ! en_loc=zero + allocate(en_loc(ibsm:ibemz,jbsm:jbemz,kbsm:kbemz,mbsm:mbemz)) call genex(s_a2b,en_full,en_loc) deallocate(en_full) + if(ntindex == ntlevs_ens)call genex_destroy_info(s_a2b) -! call genex_destroy_info(s_a2b) ! check on actual routine name - - allocate(sloc(lat2in*lon2in*(nc2d+nc3d*nsig))) call create_grd23d_(grd3d,nc2d+nc3d*grd%nsig) - iret=0 + + allocate(sloc(grd3d%lat2,grd3d%lon2,grd3d%num_fields)) + iretx=0 +!$omp parallel do schedule(dynamic,1) private(n,k,j,i,sloc) do n=1,n_ens - ii=0 - do k=1,nc2d+nc3d*nsig - do j=jbsm,jbem - do i=ibsm,ibem - ii=ii+1 - sloc(ii)=en_loc(i,j,k,n) + do k=1,grd3d%num_fields + do j=1,grd3d%lon2 + do i=1,grd3d%lat2 + sloc(i,j,k)=en_loc(i+ibsm-1,j+jbsm-1,k,n) enddo enddo enddo - call move2bundle_(grd3d,sloc,atm_bundle(n),m_cvars2d,m_cvars3d,iret) + call move2bundle_(grd3d,sloc,atm_bundle(n),m_cvars2d,m_cvars3d,iretx(n)) enddo + iret=iretx(1) + do n=2,n_ens + iret=iret+iretx(n) + end do deallocate(en_loc,sloc) call general_sub2grid_destroy_info(grd3d,grd) end subroutine get_user_ens_gfs_fastread_ -subroutine move2bundle_(grd3d,sloc,atm_bundle,m_cvars2d,m_cvars3d,iret) +subroutine move2bundle_(grd3d,en_loc3,atm_bundle,m_cvars2d,m_cvars3d,iret) !$$$ subprogram documentation block ! . . . . @@ -373,58 +388,35 @@ subroutine move2bundle_(grd3d,sloc,atm_bundle,m_cvars2d,m_cvars3d,iret) ! !$$$ - use constants, only: zero,one,two,fv use general_sub2grid_mod, only: sub2grid_info use hybrid_ensemble_parameters, only: en_perts use gsi_bundlemod, only: gsi_bundle use gsi_bundlemod, only: gsi_bundlegetpointer use control_vectors, only: cvars2d,cvars3d,nc2d,nc3d - use mpeu_util, only: getindex implicit none ! Declare passed variables type(sub2grid_info), intent(in ) :: grd3d type(gsi_bundle), intent(inout) :: atm_bundle - real(r_kind), intent(inout) :: sloc(grd3d%lat2*grd3d%lon2*(nc2d+nc3d*grd3d%nsig)) + real(r_single), intent(inout) :: en_loc3(grd3d%lat2,grd3d%lon2,grd3d%num_fields) integer(i_kind), intent(in ) :: m_cvars2d(nc2d),m_cvars3d(nc3d) integer(i_kind), intent(inout) :: iret ! Declare internal variables character(len=*),parameter :: myname_='move2bundle_' - character(len=70) :: filename - integer(i_kind) :: ierr + integer(i_kind) :: ierr,i,j integer(i_kind) :: km1,m - integer(i_kind) :: icw,iql,iqi,iqr,iqs,iqg - real(r_kind),pointer,dimension(:,:) :: ps + real(r_single),pointer,dimension(:,:) :: ps !real(r_kind),pointer,dimension(:,:) :: sst - real(r_kind),dimension(grd3d%lat2,grd3d%lon2,nc2d+nc3d*grd3d%nsig)::en_loc3 - real(r_kind),pointer,dimension(:,:,:) :: u,v,tv,q,oz,cwmr - real(r_kind),pointer,dimension(:,:,:) :: qlmr,qimr,qrmr,qsmr,qgmr - real(r_kind),parameter :: r0_001 = 0.001_r_kind - - -!--- now update halo values of all variables using general_sub2grid - call update_halos_(grd3d,sloc,en_loc3) - - ! Check hydrometeors in control variables - icw=getindex(cvars3d,'cw') - iql=getindex(cvars3d,'ql') - iqi=getindex(cvars3d,'qi') - iqr=getindex(cvars3d,'qr') - iqs=getindex(cvars3d,'qs') - iqg=getindex(cvars3d,'qg') + real(r_single),pointer,dimension(:,:,:) :: u,v,tv,q,oz,cwmr + real(r_single),pointer,dimension(:,:,:) :: qlmr,qimr,qrmr,qsmr,qgmr ! atm_bundle to zero done earlier - call gsi_bundlegetpointer(atm_bundle,'ps',ps, ierr); iret = iret+ierr + call gsi_bundlegetpointer(atm_bundle,'ps',ps, ierr); iret = ierr !call gsi_bundlegetpointer(atm_bundle,'sst',sst, ierr); iret = iret+ierr - do m=1,nc2d -! convert ps from Pa to cb - if(trim(cvars2d(m))=='ps') ps=r0_001*en_loc3(:,:,m_cvars2d(m)) -! if(trim(cvars2d(m))=='sst') sst=en_loc3(:,:,m_cvars2d(m)) !no sst for now - enddo call gsi_bundlegetpointer(atm_bundle,'sf',u , ierr); iret = ierr + iret call gsi_bundlegetpointer(atm_bundle,'vp',v , ierr); iret = ierr + iret @@ -443,19 +435,25 @@ subroutine move2bundle_(grd3d,sloc,atm_bundle,m_cvars2d,m_cvars3d,iret) write(6,'(A)') trim(myname_) // ': For now, GFS requires all MetFields: ps,u,v,(sf,vp)tv,q,oz' write(6,'(A)') trim(myname_) // ': but some have not been found. Aborting ... ' write(6,'(A)') trim(myname_) // ': WARNING!' - write(6,'(3A,I5)') trim(myname_) // ': Trouble reading ensemble file : ', trim(filename), ', IRET = ', iret endif return endif + do m=1,nc2d +! convert ps from Pa to cb + if(trim(cvars2d(m))=='ps') ps=en_loc3(:,:,m_cvars2d(m)) +! if(trim(cvars2d(m))=='sst') sst=en_loc3(:,:,m_cvars2d(m)) !no sst for now + enddo + km1 = en_perts(1,1,1)%grid%km - 1 -!$omp parallel do schedule(dynamic,1) private(m) do m=1,nc3d if(trim(cvars3d(m))=='sf')then u = en_loc3(:,:,m_cvars3d(m):m_cvars3d(m)+km1) else if(trim(cvars3d(m))=='vp') then v = en_loc3(:,:,m_cvars3d(m):m_cvars3d(m)+km1) else if(trim(cvars3d(m))=='t') then +! Note tv here is sensible temperature. Converted to virtual temperature +! later. tv = en_loc3(:,:,m_cvars3d(m):m_cvars3d(m)+km1) else if(trim(cvars3d(m))=='q') then q = en_loc3(:,:,m_cvars3d(m):m_cvars3d(m)+km1) @@ -476,9 +474,6 @@ subroutine move2bundle_(grd3d,sloc,atm_bundle,m_cvars2d,m_cvars3d,iret) end if enddo -! convert t to virtual temperature - tv=tv*(one+fv*q) - return end subroutine move2bundle_ @@ -505,51 +500,6 @@ subroutine create_grd23d_(grd23d,nvert) end subroutine create_grd23d_ -subroutine update_halos_(grd,sloc,s) - - use general_sub2grid_mod, only: sub2grid_info,general_sub2grid,general_grid2sub - - implicit none - - ! Declare passed variables - type(sub2grid_info), intent(in ) :: grd - real(r_kind), intent( out) :: s(grd%lat2,grd%lon2,grd%num_fields) - real(r_kind), intent(inout) :: sloc(grd%lat2*grd%lon2*grd%num_fields) - - ! Declare local variables - integer(i_kind) inner_vars,lat2,lon2,nlat,nlon,nvert,kbegin_loc,kend_alloc - integer(i_kind) ii,i,j,k - real(r_kind),allocatable,dimension(:,:,:,:) :: work - - lat2=grd%lat2 - lon2=grd%lon2 - nlat=grd%nlat - nlon=grd%nlon - nvert=grd%num_fields - inner_vars=grd%inner_vars - kbegin_loc=grd%kbegin_loc - kend_alloc=grd%kend_alloc - - - - allocate(work(inner_vars,nlat,nlon,kbegin_loc:kend_alloc)) - call general_sub2grid(grd,sloc,work) - - call general_grid2sub(grd,work,sloc) - deallocate(work) - - ii=0 - do k=1,nvert - do j=1,lon2 - do i=1,lat2 - ii=ii+1 - s(i,j,k)=sloc(ii) - enddo - enddo - enddo - -end subroutine update_halos_ - subroutine ens_io_partition_(n_ens,io_pe,n_io_pe_s,n_io_pe_e,n_io_pe_em,io_pe0,i_ens) ! do computation on all processors, then assign final local processor @@ -605,7 +555,7 @@ subroutine ens_io_partition_(n_ens,io_pe,n_io_pe_s,n_io_pe_e,n_io_pe_em,io_pe0,i end subroutine ens_io_partition_ -subroutine parallel_read_nemsio_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig, & +subroutine parallel_read_nemsio_state_(en_full,m_cvars2dw,m_cvars3d,nlon,nlat,nsig, & ias,jas,mas, & iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz, & filename,init_head,filenamesfc) @@ -627,7 +577,7 @@ subroutine parallel_read_nemsio_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsi integer(i_kind), intent(in ) :: nlon,nlat,nsig integer(i_kind), intent(in ) :: ias,jas,mas integer(i_kind), intent(in ) :: iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz - integer(i_kind), intent(inout) :: m_cvars2d(nc2d),m_cvars3d(nc3d) + integer(i_kind), intent(inout) :: m_cvars2dw(nc2d),m_cvars3d(nc3d) real(r_single), intent(inout) :: en_full(iasm:iaemz,jasm:jaemz,kasm:kaemz,masm:maemz) character(len=*), intent(in ) :: filename character(len=*), optional, intent(in) :: filenamesfc @@ -816,15 +766,27 @@ subroutine parallel_read_nemsio_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsi m_cvars3d(k3)=kf+1 do k=1,nsig kf=kf+1 - jj=jas-1 + jj=jas do j=1,nlon jj=jj+1 - ii=ias-1 + ii=ias do i=1,nlat ii=ii+1 en_full(ii,jj,kf,mas)=temp3(i,j,k,k3) enddo enddo + ii=ias + do i=1,nlat + ii=ii+1 + en_full(ii,jasm,kf,mas)=en_full(ii,jaem-1,kf,mas) + en_full(ii,jaem,kf,mas)=en_full(ii,jasm+1,kf,mas) + enddo + jj=jas-1 + do j=jasm,jaem + jj=jj+1 + en_full(iasm,jj,kf,mas)=en_full(iasm+1,jj,kf,mas) + en_full(iaem,jj,kf,mas)=en_full(iaem-1,jj,kf,mas) + end do enddo enddo deallocate(temp3) @@ -853,24 +815,36 @@ subroutine parallel_read_nemsio_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsi ! move temp2 to en_full do k2=1,nc2d - m_cvars2d(k2)=kf+1 + m_cvars2dw(k2)=kf+1 kf=kf+1 - jj=jas-1 + jj=jas do j=1,nlon jj=jj+1 - ii=ias-1 + ii=ias do i=1,nlat ii=ii+1 en_full(ii,jj,kf,mas)=temp2(i,j,k2) enddo enddo + ii=ias + do i=1,nlat + ii=ii+1 + en_full(ii,jasm,kf,mas)=en_full(ii,jaem-1,kf,mas) + en_full(ii,jaem,kf,mas)=en_full(ii,jasm+1,kf,mas) + enddo + jj=jas-1 + do j=jasm,jaem + jj=jj+1 + en_full(iasm,jj,kf,mas)=en_full(iasm+1,jj,kf,mas) + en_full(iaem,jj,kf,mas)=en_full(iaem-1,jj,kf,mas) + end do enddo deallocate(temp2) end subroutine parallel_read_nemsio_state_ -subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig, & +subroutine parallel_read_gfsnc_state_(en_full,m_cvars2dw,m_cvars3d,nlon,nlat,nsig, & ias,jas,mas, & iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz, & filename) @@ -884,7 +858,7 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig ! !$$$ - use constants, only: r60,r3600,zero,one,half,deg2rad + use constants, only: r60,r3600,zero,one,half,deg2rad,zero_single use control_vectors, only: cvars2d,cvars3d,nc2d,nc3d use general_sub2grid_mod, only: sub2grid_info use module_ncio, only: Dataset, Variable, Dimension, open_dataset,& @@ -898,7 +872,7 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig integer(i_kind), intent(in ) :: nlon,nlat,nsig integer(i_kind), intent(in ) :: ias,jas,mas integer(i_kind), intent(in ) :: iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz - integer(i_kind), intent(inout) :: m_cvars2d(nc2d),m_cvars3d(nc3d) + integer(i_kind), intent(inout) :: m_cvars2dw(nc2d),m_cvars3d(nc3d) real(r_single), intent(inout) :: en_full(iasm:iaemz,jasm:jaemz,kasm:kaemz,masm:maemz) character(len=*), intent(in ) :: filename @@ -1021,15 +995,27 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig m_cvars3d(k3)=kf+1 do k=1,nsig kf=kf+1 - jj=jas-1 + jj=jas do j=1,nlon jj=jj+1 - ii=ias-1 + ii=ias do i=1,nlat ii=ii+1 en_full(ii,jj,kf,mas)=temp3(i,j,k,k3) enddo enddo + ii=ias + do i=1,nlat + ii=ii+1 + en_full(ii,jasm,kf,mas)=en_full(ii,jaem-1,kf,mas) + en_full(ii,jaem,kf,mas)=en_full(ii,jasm+1,kf,mas) + enddo + jj=jas-1 + do j=jasm,jaem + jj=jj+1 + en_full(iasm,jj,kf,mas)=en_full(iasm+1,jj,kf,mas) + en_full(iaem,jj,kf,mas)=en_full(iaem-1,jj,kf,mas) + end do enddo enddo @@ -1042,24 +1028,34 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig call read_vardata(atmges, 'pressfc', rwork2d) call move1_(rwork2d,temp2,nlon,nlat) call fillpoles_ss_(temp2,nlon,nlat) - else - temp2=zero - endif ! move temp2 to en_full - kf=kf+1 - m_cvars2d(k2)=kf - jj=jas-1 - do j=1,nlon - jj=jj+1 - ii=ias-1 + kf=kf+1 + m_cvars2dw(k2)=kf + jj=jas + do j=1,nlon + jj=jj+1 + ii=ias + do i=1,nlat + ii=ii+1 + en_full(ii,jj,kf,mas)=temp2(i,j) + enddo + enddo + ii=ias do i=1,nlat ii=ii+1 - en_full(ii,jj,kf,mas)=temp2(i,j) + en_full(ii,jasm,kf,mas)=en_full(ii,jaem-1,kf,mas) + en_full(ii,jaem,kf,mas)=en_full(ii,jasm+1,kf,mas) enddo - enddo + jj=jas-1 + do j=jasm,jaem + jj=jj+1 + en_full(iasm,jj,kf,mas)=en_full(iasm+1,jj,kf,mas) + en_full(iaem,jj,kf,mas)=en_full(iaem-1,jj,kf,mas) + end do + end if enddo -! call close_dataset(atmges) + call close_dataset(atmges) deallocate(rwork2d) deallocate(temp2) @@ -1158,7 +1154,7 @@ subroutine fillpoles_sv_(tempu,tempv,nlon,nlat,clons,slons) real(r_single), intent(inout) :: tempu(nlat,nlon),tempv(nlat,nlon) real(r_kind), intent(in ) :: clons(nlon),slons(nlon) - integer(i_kind) i + integer(i_kind) i,nlatm real(r_kind) polnu,polnv,polsu,polsv ! Compute mean along southern and northern latitudes @@ -1166,11 +1162,12 @@ subroutine fillpoles_sv_(tempu,tempv,nlon,nlat,clons,slons) polnv=zero polsu=zero polsv=zero + nlatm=nlat-1 do i=1,nlon - polnu=polnu+tempu(nlat-1,i)*clons(i)-tempv(nlat-1,i)*slons(i) - polnv=polnv+tempu(nlat-1,i)*slons(i)+tempv(nlat-1,i)*clons(i) - polsu=polsu+tempu(2,i )*clons(i)+tempv(2,i )*slons(i) - polsv=polsv+tempu(2,i )*slons(i)-tempv(2,i )*clons(i) + polnu=polnu+tempu(nlatm,i)*clons(i)-tempv(nlatm,i)*slons(i) + polnv=polnv+tempu(nlatm,i)*slons(i)+tempv(nlatm,i)*clons(i) + polsu=polsu+tempu(2,i )*clons(i)+tempv(2,i )*slons(i) + polsv=polsv+tempu(2,i )*slons(i)-tempv(2,i )*clons(i) end do polnu=polnu/float(nlon) polnv=polnv/float(nlon) diff --git a/src/gsi/general_specmod.f90 b/src/gsi/general_specmod.f90 index c90187bf70..439e26e431 100644 --- a/src/gsi/general_specmod.f90 +++ b/src/gsi/general_specmod.f90 @@ -64,7 +64,6 @@ module general_specmod ! set subroutines to public public :: general_init_spec_vars public :: general_destroy_spec_vars - public :: general_spec_multwgt ! set passed variables to public public :: spec_vars public :: spec_cut @@ -307,36 +306,6 @@ subroutine general_init_spec_vars(sp,jcap,jcap_test,nlat_a,nlon_a,eqspace) return end subroutine general_init_spec_vars - subroutine general_spec_multwgt(sp,spcwrk,spcwgt) -! 2017-03-30 J. Kay, X. Wang - add general_spec_multwgt for scale-dependent -! weighting of mixed resolution ensemble, -! POC: xuguang.wang@ou.edu -! - implicit none - type(spec_vars),intent(in) :: sp - real(r_kind),dimension(sp%nc),intent(inout) :: spcwrk - real(r_kind),dimension(0:sp%jcap),intent(in) :: spcwgt - - integer(i_kind) l,jmax,ks,n - -!! Code borrowed from spvar in splib - jmax=sp%jcap - - do n=0,jmax - ks=2*n - spcwrk(ks+1)=spcwrk(ks+1)*spcwgt(n) - end do - do n=0,jmax - do l=MAX(1,n-jmax),MIN(n,jmax) - ks=l*(2*jmax+(-1)*(l-1))+2*n - spcwrk(ks+1) = spcwrk(ks+1)*spcwgt(n) - spcwrk(ks+2) = spcwrk(ks+2)*spcwgt(n) - end do - end do - - return - end subroutine general_spec_multwgt - subroutine general_destroy_spec_vars(sp) !$$$ subprogram documentation block ! . . . . diff --git a/src/gsi/general_spectral_transforms.f90 b/src/gsi/general_spectral_transforms.f90 index d4f0959489..541923e450 100644 --- a/src/gsi/general_spectral_transforms.f90 +++ b/src/gsi/general_spectral_transforms.f90 @@ -368,8 +368,8 @@ subroutine sfilter(grd,sp,filter,grid) call general_sptez_s(sp,spec_work,work,-1) - gnlon=float(grd%nlon) -! gnlon=real(grd%nlon,r_kind) +! gnlon=float(grd%nlon) + gnlon=real(grd%nlon,r_kind) do i=1,sp%nc spec_work(i)=spec_work(i)*gnlon end do diff --git a/src/gsi/general_sub2grid_mod.f90 b/src/gsi/general_sub2grid_mod.f90 index f0548643bb..bacea4688b 100644 --- a/src/gsi/general_sub2grid_mod.f90 +++ b/src/gsi/general_sub2grid_mod.f90 @@ -87,6 +87,7 @@ module general_sub2grid_mod interface general_sub2grid module procedure general_sub2grid_r_single_rank11 module procedure general_sub2grid_r_single_rank14 + module procedure general_sub2grid_r_single_rank13 module procedure general_sub2grid_r_single_rank4 module procedure general_sub2grid_r_double_rank11 module procedure general_sub2grid_r_double_rank14 @@ -97,6 +98,7 @@ module general_sub2grid_mod module procedure general_grid2sub_r_single_rank11 module procedure general_grid2sub_r_single_rank41 module procedure general_grid2sub_r_single_rank4 + module procedure general_grid2sub_r_single_rank31 module procedure general_grid2sub_r_double_rank11 module procedure general_grid2sub_r_double_rank41 module procedure general_grid2sub_r_double_rank4 @@ -1019,6 +1021,93 @@ subroutine general_sub2grid_r_single_rank14(s,sub_vars,grid_vars) end subroutine general_sub2grid_r_single_rank14 + subroutine general_sub2grid_r_single_rank13(s,sub_vars,grid_vars) +!$$$ subprogram documentation block +! . . . . +! subprogram: general_sub2grid_r_single_rank4 convert from subdomains to full horizontal grid +! prgmmr: parrish org: np22 date: 2010-02-11 +! +! abstract: generalized version of sub2grid--uses only gsi module kinds. +! All information needed is contained in the structure variable +! "s", instead of various modules. This allows +! for easy adaptation for any collection/ordering of variables +! defined on subdomains, which need to be made available on +! full horizontal grid for horizontal operations. +! The structure variable is specified by subroutine general_sub2grid_setup. +! This version works with single precision (4-byte) real variables. +! Input sub_vars, the desired arrays on horizontal subdomains, has one +! halo row, for now, which is filled with zero, since for ensemble use, +! there is no need for a halo, but is easiest for now to keep it. +! A later version will have variable number of halo rows, filled with proper values. +! +! program history log: +! 2010-02-11 parrish, initial documentation +! +! input argument list: +! s - structure variable, contains all necessary information for +! moving this set of subdomain variables sub_vars to +! the corresponding set of full horizontal grid variables. +! sub_vars - input grid values in vertical subdomain mode (contains one halo row) +! +! output argument list: +! grid_vars - output grid values in horizontal slab mode. +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ + use mpimod, only: mpi_comm_world,mpi_real4 + implicit none + + type(sub2grid_info),intent(in ) :: s + real(r_single), intent(in ) :: sub_vars(s%lat2*s%lon2*s%num_fields) + real(r_single), intent( out) :: grid_vars(s%nlat,s%nlon,s%kbegin_loc:s%kend_alloc) + + real(r_single) :: sub_vars_r4(s%lat2,s%lon2,s%num_fields) + real(r_single) :: sub_vars0(s%lat1,s%lon1,s%num_fields) + real(r_single) :: work(s%itotsub*(s%kend_alloc-s%kbegin_loc+1)) + integer(i_kind) iloc,iskip,i,i0,j,j0,k,n,k_in,ilat,jlon,ierror,ioffset + + sub_vars_r4 = reshape(sub_vars,(/s%lat2,s%lon2,s%num_fields/)) +! remove halo row +!$omp parallel do schedule(dynamic,1) private(k,j,j0,i0,i) + do k=1,s%num_fields + do j=2,s%lon2-1 + j0=j-1 + do i=2,s%lat2-1 + i0=i-1 + sub_vars0(i0,j0,k)=sub_vars_r4(i,j,k) + end do + end do + end do + + call mpi_alltoallv(sub_vars0,s%recvcounts,s%rdispls,mpi_real4, & + work,s%sendcounts,s%sdispls,mpi_real4,mpi_comm_world,ierror) + + + k_in=s%kend_loc-s%kbegin_loc+1 + +! Load grid_vars array in desired order +!$omp parallel do schedule(dynamic,1) private(k,iskip,iloc,n,i,ilat,jlon,ioffset) + do k=s%kbegin_loc,s%kend_loc + iskip=0 + iloc=0 + do n=1,s%npe + if (n/=1) then + iskip=iskip+s%ijn(n-1)*k_in + end if + ioffset=iskip+(k-s%kbegin_loc)*s%ijn(n) + do i=1,s%ijn(n) + iloc=iloc+1 + ilat=s%ltosi(iloc) + jlon=s%ltosj(iloc) + grid_vars(ilat,jlon,k)=work(i + ioffset) + end do + end do + end do + + end subroutine general_sub2grid_r_single_rank13 subroutine general_sub2grid_r_single_rank4(s,sub_vars,grid_vars) !$$$ subprogram documentation block ! . . . . @@ -1199,6 +1288,84 @@ subroutine general_grid2sub_r_single_rank41(s,grid_vars,sub_vars) end subroutine general_grid2sub_r_single_rank41 + subroutine general_grid2sub_r_single_rank31(s,grid_vars,sub_vars) +!$$$ subprogram documentation block +! . . . . +! subprogram: general_sub2grid convert from subdomains to full horizontal grid +! prgmmr: parrish org: np22 date: 2010-02-11 +! +! abstract: generalized version of grid2sub--uses only gsi module kinds. +! All information needed is contained in the structure variable +! "s", instead of various modules. This allows +! for easy adaptation for any collection/ordering of variables +! defined on subdomains, which need to be made available on +! full horizontal grid for horizontal operations. +! The structure variable is specified by subroutine general_sub2grid_setup. +! This version works with single precision (4-byte) real variables. +! Output sub_vars, the desired arrays on horizontal subdomains, has one +! halo row, for now, which is filled with zero, since for ensemble use, +! there is no need for a halo, but is easiest for now to keep it. +! A later version will have variable number of halo rows, filled with proper values. +! +! program history log: +! 2010-02-11 parrish, initial documentation +! 2010-03-02 parrish - remove setting halo to zero in output +! 2014-12-03 derber - make similar optimization changes already in code for +! double precision. +! +! input argument list: +! s - structure variable, contains all necessary information for +! moving this set of subdomain variables sub_vars to +! the corresponding set of full horizontal grid variables. +! grid_vars - input grid values in horizontal slab mode. +! +! output argument list: +! sub_vars - output grid values in vertical subdomain mode +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ + use constants, only: zero + use mpimod, only: mpi_comm_world,mpi_real4 + implicit none + + type(sub2grid_info),intent(in ) :: s + real(r_single), intent(in ) :: grid_vars(s%nlat,s%nlon,s%kbegin_loc:s%kend_alloc) + real(r_single), intent( out) :: sub_vars(s%lat2*s%lon2*s%num_fields) + + real(r_single) :: sub_vars_r4(s%lat2,s%lon2,s%num_fields) + real(r_single) :: temp(s%itotsub*(s%kend_loc-s%kbegin_loc+1)) + integer(i_kind) iloc,i,ii,k,n,ilat,jlon,ierror,icount + integer(i_kind),dimension(s%npe) ::iskip + +! reorganize for eventual distribution to local domains + iskip(1)=0 + do n=2,s%npe + iskip(n)=iskip(n-1)+s%ijn_s(n-1)*(s%kend_loc-s%kbegin_loc+1) + end do +!$omp parallel do schedule(dynamic,1) private(n,k,i,jlon,ii,ilat,iloc,icount) + do k=s%kbegin_loc,s%kend_loc + icount=0 + do n=1,s%npe + iloc=iskip(n)+(k-s%kbegin_loc)*s%ijn_s(n) + do i=1,s%ijn_s(n) + iloc=iloc+1 + icount=icount+1 + ilat=s%ltosi_s(icount) + jlon=s%ltosj_s(icount) + temp(iloc)=grid_vars(ilat,jlon,k) + end do + end do + end do + + + call mpi_alltoallv(temp,s%sendcounts_s,s%sdispls_s,mpi_real4, & + sub_vars_r4,s%recvcounts_s,s%rdispls_s,mpi_real4,mpi_comm_world,ierror) + + sub_vars = reshape(sub_vars_r4,(/s%lat2*s%lon2*s%num_fields/)) + end subroutine general_grid2sub_r_single_rank31 subroutine general_grid2sub_r_single_rank4(s,grid_vars,sub_vars) !$$$ subprogram documentation block ! . . . . diff --git a/src/gsi/get_gefs_ensperts_dualres.f90 b/src/gsi/get_gefs_ensperts_dualres.f90 index e244fa9f53..bb5ee374af 100644 --- a/src/gsi/get_gefs_ensperts_dualres.f90 +++ b/src/gsi/get_gefs_ensperts_dualres.f90 @@ -26,7 +26,7 @@ subroutine get_gefs_ensperts_dualres ! ! get_gefs_ensperts_dualres.f90(182): error #6460: This is not a field name that ! is defined in the encompassing structure. [LAT2] -! call genqsat(qs,tsen,prsl,grd_ens%lat2,grd_ens%lon2,grd_ens%nsig,ice,iderivative) +! call genqsat2(qs,tsen,prsl,grd_ens%lat2,grd_ens%lon2,grd_ens%nsig,ice) ! 2014-11-30 todling - partially generalized to handle any control vector ! (GFS hook needs further attention) ! - also, take SST from members of ensemble @@ -69,7 +69,7 @@ subroutine get_gefs_ensperts_dualres use gsi_enscouplermod, only: gsi_enscoupler_create_sub2grid_info use gsi_enscouplermod, only: gsi_enscoupler_destroy_sub2grid_info use general_sub2grid_mod, only: sub2grid_info,general_sub2grid_create_info,general_sub2grid_destroy_info - use hybrid_ensemble_parameters, only: nsclgrp,sp_ens,spc_multwgt,global_spectral_filter_sd + use hybrid_ensemble_parameters, only: nsclgrp,sp_ens,global_spectral_filter_sd implicit none real(r_kind),pointer,dimension(:,:) :: ps @@ -78,24 +78,23 @@ subroutine get_gefs_ensperts_dualres ! real(r_kind),dimension(grd_ens%nlat,grd_ens%nlon):: sst_full,dum real(r_kind),pointer,dimension(:,:,:):: p3 real(r_kind),pointer,dimension(:,:):: x2 - type(gsi_bundle),allocatable,dimension(:) :: en_read + type(gsi_bundle),allocatable,dimension(:) :: en_real8 type(gsi_bundle):: en_bar - type(gsi_bundle) :: en_pertstmp1 - type(gsi_bundle) :: en_pertstmp2 ! type(gsi_grid) :: grid_ens - real(r_kind) bar_norm,sig_norm,kapr,kap1 + real(r_kind) bar_norm,sig_norm ! real(r_kind),allocatable,dimension(:,:):: z,sst2 - real(r_kind),allocatable,dimension(:,:,:) :: tsen,prsl,pri,qs + real(r_kind),allocatable,dimension(:,:,:) :: tsen,prsl ! integer(i_kind),dimension(grd_ens%nlat,grd_ens%nlon):: idum - integer(i_kind) istatus,iret,i,ic3,j,k,n,iderivative,im,jm,km,m,ipic + integer(i_kind) istatus,iret,i,ic3,j,k,n,im,jm,km,m,ipic ! integer(i_kind) mm1 integer(i_kind) ipc3d(nc3d),ipc2d(nc2d) integer(i_kind) ier ! integer(i_kind) il,jl logical ice,hydrometeor type(sub2grid_info) :: grd_tmp - integer(i_kind) :: ig + real(r_kind),parameter :: r0_001 = 0.001_r_kind + ! Create perturbations grid and get variable names from perturbations if(en_perts(1,1,1)%grid%im/=grd_ens%lat2.or. & @@ -135,25 +134,15 @@ subroutine get_gefs_ensperts_dualres if ( istatus /= 0 ) & call die('get_gefs_ensperts_dualres',': trouble creating en_bar bundle, istatus =',istatus) -! Allocate bundle used for temporary usage - if( nsclgrp > 1 .and. global_spectral_filter_sd )then - call gsi_bundlecreate(en_pertstmp1,en_perts(1,1,1)%grid,'aux-ens-read',istatus,names2d=cvars2d,names3d=cvars3d) - call gsi_bundlecreate(en_pertstmp2,en_perts(1,1,1)%grid,'aux-ens-read',istatus,names2d=cvars2d,names3d=cvars3d) - if(istatus/=0) then - write(6,*)' get_gefs_ensperts_dualres: trouble creating en_read like tempbundle' - call stop2(999) - endif - end if - - ! Allocate bundle used for reading members - allocate(en_read(n_ens)) + ! Allocate bundle used for real*8 version of members + allocate(en_real8(n_ens)) do n=1,n_ens - call gsi_bundlecreate(en_read(n),en_perts(1,1,1)%grid,'ensemble member',istatus,names2d=cvars2d,names3d=cvars3d) + call gsi_bundlecreate(en_real8(n),en_perts(1,1,1)%grid,'ensemble member',istatus,names2d=cvars2d,names3d=cvars3d) if ( istatus /= 0 ) & - call die('get_gefs_ensperts_dualres',': trouble creating en_read bundle, istatus =',istatus) - en_read(n) = zero + call die('get_gefs_ensperts_dualres',': trouble creating en_real8 bundle, istatus =',istatus) end do + ! allocate(z(im,jm)) ! allocate(sst2(im,jm)) @@ -162,7 +151,7 @@ subroutine get_gefs_ensperts_dualres ntlevs_ens_loop: do m=1,ntlevs_ens - call gsi_enscoupler_get_user_Nens(grd_tmp,n_ens,m,en_read,iret) + call gsi_enscoupler_get_user_Nens(grd_tmp,n_ens,m,en_perts(:,1,m),iret) ! Check read return code. Revert to static B if read error detected if ( iret /= 0 ) then @@ -175,67 +164,52 @@ subroutine get_gefs_ensperts_dualres endif en_bar%values=zero + allocate(tsen(im,jm,km)) if (.not.q_hyb_ens) then !use RH - allocate(pri(im,jm,km+1)) - allocate(prsl(im,jm,km),tsen(im,jm,km)) - allocate(qs(im,jm,km)) + allocate(prsl(im,jm,km)) end if do n=1,n_ens + do i=1,nelen + en_real8(n)%values(i)=real(en_perts(n,1,m)%valuesr4(i),r_kind) + end do - call gsi_bundlegetpointer(en_read(n),'q' ,q ,ier);istatus=istatus+ier + call gsi_bundlegetpointer(en_real8(n),'q' ,q ,ier);istatus=istatus+ier + call gsi_bundlegetpointer(en_real8(n),'t' ,tv,ier);istatus=istatus+ier + call gsi_bundlegetpointer(en_real8(n),'ps',ps,ier);istatus=ier +! Convert ps to correct units + do j=1,jm + do i=1,im + ps(i,j)=r0_001*ps(i,j) + end do + end do +! Convert to real from single and convert tv to virtual temperature do k=1,km do j=1,jm do i=1,im +! Use following 3 lines for results identical to previous version +! tv(i,j,k)= tv(i,j,k)*(one+fv*q(i,j,k)) +! q(i,j,k)=max(q(i,j,k),zero) +! tsen(i,j,k)=tv(i,j,k)/(one+fv*q(i,j,k)) +! Remove following 3 lines for results identical to previous version q(i,j,k)=max(q(i,j,k),zero) + tsen(i,j,k)=tv(i,j,k) + tv(i,j,k)= tsen(i,j,k)*(one+fv*q(i,j,k)) end do end do end do if (.not.q_hyb_ens) then !use RH - call gsi_bundlegetpointer(en_read(n),'ps',ps,ier);istatus=ier - call gsi_bundlegetpointer(en_read(n),'t' ,tv,ier);istatus=istatus+ier + ! Compute RH ! Get 3d pressure field now on interfaces - call general_getprs_glb(ps,tv,pri) -! Get sensible temperature and 3d layer pressure - if (idsl5 /= 2) then - kap1=rd_over_cp+one - kapr=one/rd_over_cp -!$omp parallel do schedule(dynamic,1) private(k,j,i) - do k=1,km - do j=1,jm - do i=1,im - prsl(i,j,k)=((pri(i,j,k)**kap1-pri(i,j,k+1)**kap1)/& - (kap1*(pri(i,j,k)-pri(i,j,k+1))))**kapr - tsen(i,j,k)= tv(i,j,k)/(one+fv*q(i,j,k)) - end do - end do - end do - else -!$omp parallel do schedule(dynamic,1) private(k,j,i) - do k=1,km - do j=1,jm - do i=1,im - prsl(i,j,k)=(pri(i,j,k)+pri(i,j,k+1))*half - tsen(i,j,k)= tv(i,j,k)/(one+fv*q(i,j,k)) - end do - end do - end do - end if + call general_getprs_glb(ps,tv,prsl) ice=.true. - iderivative=0 - call genqsat(qs,tsen,prsl,im,jm,km,ice,iderivative) - do k=1,km - do j=1,jm - do i=1,im - q(i,j,k)=q(i,j,k)/qs(i,j,k) - end do - end do - end do + call genqsat2(q,tsen,prsl,ice) + end if -!$omp parallel do schedule(dynamic,1) private(i,k,j,ic3,hydrometeor,istatus,p3) +! !$omp parallel do schedule(dynamic,1) private(i,k,j,ic3,hydrometeor,istatus,p3) do ic3=1,nc3d hydrometeor = trim(cvars3d(ic3))=='cw' .or. trim(cvars3d(ic3))=='ql' .or. & @@ -246,7 +220,7 @@ subroutine get_gefs_ensperts_dualres if ( hydrometeor ) then - call gsi_bundlegetpointer(en_read(n),trim(cvars3d(ic3)),p3,istatus) + call gsi_bundlegetpointer(en_real8(n),trim(cvars3d(ic3)),p3,istatus) if(istatus/=0) then write(6,*)' error retrieving pointer to ',trim(cvars3d(ic3)),' from read in member ',n,m call stop2(999) @@ -260,7 +234,7 @@ subroutine get_gefs_ensperts_dualres end do else if ( trim(cvars3d(ic3)) == 'oz' .and. oz_univ_static ) then - call gsi_bundlegetpointer(en_read(n),trim(cvars3d(ic3)),p3,istatus) + call gsi_bundlegetpointer(en_real8(n),trim(cvars3d(ic3)),p3,istatus) if(istatus/=0) then write(6,*)' error retrieving pointer to ',trim(cvars3d(ic3)),' from read in member ',n,m call stop2(999) @@ -270,7 +244,7 @@ subroutine get_gefs_ensperts_dualres end do !c3d do i=1,nelen - en_bar%values(i)=en_bar%values(i)+en_read(n)%values(i)*bar_norm + en_bar%values(i)=en_bar%values(i)+en_real8(n)%values(i)*bar_norm end do @@ -282,10 +256,9 @@ subroutine get_gefs_ensperts_dualres end do ! end do over ensembles if (.not.q_hyb_ens) then !use RH - deallocate(pri) - deallocate(tsen,prsl) - deallocate(qs) + deallocate(prsl) end if + deallocate(tsen) ! Before converting to perturbations, get ensemble spread !!! it is not clear of the next statement is thread/$omp safe. @@ -309,7 +282,7 @@ subroutine get_gefs_ensperts_dualres !$omp parallel do schedule(dynamic,1) private(n,i,ic3,ipic,k,j) do n=1,n_ens do i=1,nelen - en_perts(n,1,m)%valuesr4(i)=en_read(n)%values(i)-en_bar%values(i) + en_perts(n,1,m)%valuesr4(i)=en_real8(n)%values(i)-en_bar%values(i) end do if(.not. q_hyb_ens) then do ic3=1,nc3d @@ -318,8 +291,8 @@ subroutine get_gefs_ensperts_dualres do k=1,km do j=1,jm do i=1,im - en_perts(n,1,m)%r3(ipic)%qr4(i,j,k) = min(en_perts(n,1,m)%r3(ipic)%qr4(i,j,k),limqens) - en_perts(n,1,m)%r3(ipic)%qr4(i,j,k) = max(en_perts(n,1,m)%r3(ipic)%qr4(i,j,k),-limqens) + en_perts(n,1,m)%r3(ipic)%qr4(i,j,k) = & + max(min(en_perts(n,1,m)%r3(ipic)%qr4(i,j,k),limqens),-limqens) end do end do end do @@ -330,33 +303,22 @@ subroutine get_gefs_ensperts_dualres en_perts(n,1,m)%valuesr4(i)=en_perts(n,1,m)%valuesr4(i)*sig_norm end do end do + if(nsclgrp > 1 .and. global_spectral_filter_sd) then + call apply_scaledepwgts(m,grd_ens,sp_ens) + end if end do ntlevs_ens_loop !end do over bins - call gsi_bundledestroy(en_bar,istatus) - if(nsclgrp > 1 .and. global_spectral_filter_sd) then - do m=1,ntlevs_ens - do n=1,n_ens - en_pertstmp1%values=en_perts(n,1,m)%valuesr4 - do ig=1,nsclgrp - call apply_scaledepwgts(grd_ens,sp_ens,en_pertstmp1,spc_multwgt(:,ig),en_pertstmp2) - en_perts(n,ig,m)%valuesr4=en_pertstmp2%values - enddo - enddo - enddo - endif - do n=n_ens,1,-1 - call gsi_bundledestroy(en_read(n),istatus) + call gsi_bundledestroy(en_real8(n),istatus) if ( istatus /= 0 ) & - call die('get_gefs_ensperts_dualres',': trouble destroying en_read bundle, istatus = ', istatus) + call die('get_gefs_ensperts_dualres',': trouble destroying en_real8 bundle, istatus = ', istatus) end do - deallocate(en_read) - if(nsclgrp > 1 .and. global_spectral_filter_sd) then - call gsi_bundledestroy(en_pertstmp1,istatus) - call gsi_bundledestroy(en_pertstmp2,istatus) - if ( istatus /= 0 ) & - call die('get_gefs_ensperts_dualres',': trouble destroying en_pertstmp2 bundle, istatus = ', istatus) - end if + deallocate(en_real8) + + call gsi_bundledestroy(en_bar,istatus) + + if(nsclgrp > 1 .and. global_spectral_filter_sd) call destroy_mult_spc_wgts + call gsi_enscoupler_destroy_sub2grid_info(grd_tmp) ! mm1=mype+1 @@ -675,7 +637,7 @@ subroutine write_spread_dualres(ibin,bundle) return end subroutine write_spread_dualres -subroutine general_getprs_glb(ps,tv,prs) +subroutine general_getprs_glb(ps,tv,prsl) ! subprogram: getprs get 3d pressure or 3d pressure deriv ! prgmmr: kleist org: np20 date: 2005-09-29 ! @@ -707,107 +669,197 @@ subroutine general_getprs_glb(ps,tv,prs) use kinds,only: r_kind,i_kind use constants,only: zero,half,one_tenth,rd_over_cp,one - use gridmod,only: nsig,ak5,bk5,ck5,tref5,idvc5 - use gridmod,only: wrf_nmm_regional,nems_nmmb_regional,eta1_ll,eta2_ll,pdtop_ll,pt_ll,& - regional,wrf_mass_regional,twodvar_regional,fv3_regional + use gridmod,only: nsig,ak5,bk5,ck5,tref5,idvc5,idsl5 use hybrid_ensemble_parameters, only: grd_ens implicit none ! Declare passed variables - real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2) ,intent(in ) :: ps - real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,nsig) ,intent(in ) :: tv - real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,nsig+1),intent( out) :: prs + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2) ,intent(in ) :: ps + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,nsig),intent(in ) :: tv + real(r_kind),dimension(grd_ens%lat2,grd_ens%lon2,nsig),intent( out) :: prsl ! Declare local variables - real(r_kind) kapr,trk + real(r_kind) kapr,trk,kap1 + real(r_kind),dimension(grd_ens%lat2,nsig+1) :: prs integer(i_kind) i,j,k,k2 ! ,it -! Declare local parameter - real(r_kind),parameter:: ten = 10.0_r_kind - if (regional) then - if(wrf_nmm_regional.or.nems_nmmb_regional) then - do k=1,nsig+1 - do j=1,grd_ens%lon2 + k2=nsig+1 + kap1=rd_over_cp+one + kapr=one/rd_over_cp +!$omp parallel do schedule(dynamic,1) private(k,j,i,trk,prs) + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + prs(i,1)=ps(i,j) + prs(i,k2)=zero + end do + if (idvc5 /= 3) then + do k=2,nsig do i=1,grd_ens%lat2 - prs(i,j,k)=one_tenth* & - (eta1_ll(k)*pdtop_ll + & - eta2_ll(k)*(ten*ps(i,j)-pdtop_ll-pt_ll) + & - pt_ll) + prs(i,k)=ak5(k)+bk5(k)*ps(i,j) end do end do - end do - elseif (fv3_regional) then - do k=1,nsig+1 - do j=1,grd_ens%lon2 + else + do k=1,nsig do i=1,grd_ens%lat2 - prs(i,j,k)=eta1_ll(k)+ eta2_ll(k)*ps(i,j) + trk=(half*(tv(i,j,k-1)+tv(i,j,k))/tref5(k))**kapr + prs(i,k)=ak5(k)+(bk5(k)*ps(i,j))+(ck5(k)*trk) end do end do - end do - - elseif (twodvar_regional) then - do k=1,nsig+1 - do j=1,grd_ens%lon2 + end if +! Get sensible temperature and 3d layer pressure + if (idsl5 /= 2) then + do k=1,nsig do i=1,grd_ens%lat2 - prs(i,j,k)=one_tenth*(eta1_ll(k)*(ten*ps(i,j)-pt_ll) + pt_ll) + prsl(i,j,k)=((prs(i,k)**kap1-prs(i,k+1)**kap1)/& + (kap1*(prs(i,k)-prs(i,k+1))))**kapr end do end do - end do - elseif (wrf_mass_regional) then - do k=1,nsig+1 - do j=1,grd_ens%lon2 + else + do k=1,nsig do i=1,grd_ens%lat2 - prs(i,j,k)=one_tenth*(eta1_ll(k)*(ten*ps(i,j)-pt_ll) + & - eta2_ll(k) + pt_ll) + prsl(i,j,k)=(prs(i,k)+prs(i,k+1))*half end do end do - end do - endif - else - if (idvc5 /= 3) then -!$omp parallel do schedule(dynamic,1) private(k,j,i) - do k=1,nsig - if(k == 1)then - k2=nsig+1 - do j=1,grd_ens%lon2 - do i=1,grd_ens%lat2 - prs(i,j,k)=ps(i,j) - prs(i,j,k2)=zero - end do - end do - else - do j=1,grd_ens%lon2 - do i=1,grd_ens%lat2 - prs(i,j,k)=ak5(k)+bk5(k)*ps(i,j) - end do - end do + end if + end do + + return +end subroutine general_getprs_glb +subroutine genqsat2(q,tsen,prsl,ice) +!$$$ subprogram documentation block +! . . . . +! subprogram: genqsat +! prgmmr: derber org: np23 date: 1998-01-14 +! +! abstract: obtain saturation specific humidity for given temperature. +! +! program history log: +! 1998-01-14 derber +! 1998-04-05 weiyu yang +! 1999-08-24 derber, j., treadon, r., yang, w., first frozen mpp version +! 1903-10-07 Wei Gu, bug fixes,if qs<0,then set qs=0; merge w/ GSI by R Todling +! 2003-12-23 kleist, use guess pressure, adapt module framework +! 2004-05-13 kleist, documentation +! 2004-06-03 treadon, replace ggrid_g3 array with ges_* arrays +! 2005-02-23 wu, output dlnesdtv +! 2005-11-21 kleist, derber add dmax array to decouple moisture from temp and +! pressure for questionable qsat +! 2006-02-02 treadon - rename prsl as ges_prsl +! 2006-09-18 derber - modify to limit saturated values near top +! 2006-11-22 derber - correct bug: es 2._r_kind) .and. & + tsen(i,j,k) < mint(i))then + lmint(i)=k + mint(i)=tsen(i,j,k) end if end do - else - kapr=one/rd_over_cp -!$omp parallel do schedule(dynamic,1) private(k,j,i,trk) - do k=1,nsig - if(k == 1)then - k2=nsig+1 - do j=1,grd_ens%lon2 - do i=1,grd_ens%lat2 - prs(i,j,k)=ps(i,j) - prs(i,j,k2)=zero - end do - end do + end do + do i=1,grd_ens%lat2 + tdry = mint(i) + tr = ttp/tdry + if (tdry >= ttp .or. .not. ice) then + estmax(i) = psat * (tr**xa) * exp(xb*(one-tr)) + elseif (tdry < tmix) then + estmax(i) = psat * (tr**xai) * exp(xbi*(one-tr)) + else + w = (tdry - tmix) / (ttp - tmix) + estmax(i) = w * psat * (tr**xa) * exp(xb*(one-tr)) & + + (one-w) * psat * (tr**xai) * exp(xbi*(one-tr)) + endif + end do + + do k = 1,nsig + do i = 1,grd_ens%lat2 + tdry = tsen(i,j,k) + tr = ttp/tdry + if (tdry >= ttp .or. .not. ice) then + es = psat * (tr**xa) * exp(xb*(one-tr)) + elseif (tdry < tmix) then + es = psat * (tr**xai) * exp(xbi*(one-tr)) else - do j=1,grd_ens%lon2 - do i=1,grd_ens%lat2 - trk=(half*(tv(i,j,k-1)+tv(i,j,k))/tref5(k))**kapr - prs(i,j,k)=ak5(k)+(bk5(k)*ps(i,j))+(ck5(k)*trk) - end do - end do + esw = psat * (tr**xa) * exp(xb*(one-tr)) + esi = psat * (tr**xai) * exp(xbi*(one-tr)) + w = (tdry - tmix) / (ttp - tmix) + es = w * esw + (one-w) * esi +! es = w * psat * (tr**xa) * exp(xb*(one-tr)) & +! + (one-w) * psat * (tr**xai) * exp(xbi*(one-tr)) + + endif + + pw = onep3*prsl(i,j,k) + if(lmint(i) < k)then + esmax=0.1_r_kind*pw + esmax=min(esmax,estmax(i)) + es=min(es,esmax) end if - end do - end if - end if + qs = max(qmin, eps * es / (pw - omeps * es)) + q(i,j,k) = q(i,j,k)/qs + end do + end do + end do return -end subroutine general_getprs_glb +end subroutine genqsat2 + diff --git a/src/gsi/hybrid_ensemble_isotropic.F90 b/src/gsi/hybrid_ensemble_isotropic.F90 index fe2e058dff..fc87026c98 100644 --- a/src/gsi/hybrid_ensemble_isotropic.F90 +++ b/src/gsi/hybrid_ensemble_isotropic.F90 @@ -591,7 +591,7 @@ subroutine new_factorization_rf_x(f,iadvance,iback,nlevs,ig) ny=grd_loc%nlat ; nx=grd_loc%nlon ; nz=nlevs if(vvlocal)then -!$omp parallel do schedule(dynamic,1) private(k,j,i,l) +!$omp parallel do schedule(static,1) private(k,j,i,l) do k=1,nz if(iadvance == 1) then @@ -634,7 +634,7 @@ subroutine new_factorization_rf_x(f,iadvance,iback,nlevs,ig) enddo else -!$omp parallel do schedule(dynamic,1) private(k,j,i,l) +!$omp parallel do schedule(static,1) private(k,j,i,l) do k=1,nz if(iadvance == 1) then @@ -1607,7 +1607,7 @@ subroutine fix_belt(z) real(r_kind) zloc1(ny,nx) integer(i_kind) i,ii,j,jj,k -!$omp parallel do schedule(dynamic,1) private(j,k,i,jj,ii,zloc1) +!$omp parallel do schedule(static,1) private(j,k,i,jj,ii,zloc1) do j=1,nscl do k=1,nnnn1o i=0 @@ -1686,7 +1686,7 @@ subroutine rescale_ensemble_rh_perturbations end if do m=1,ntlevs_ens do ig=1,ntotensgrp -!$omp parallel do schedule(dynamic,1) private(n,i,j,k,w3,istatus) +!$omp parallel do schedule(static,1) private(n,i,j,k,w3,istatus) do n=1,n_ens call gsi_bundlegetpointer(en_perts(n,ig,m),'q',w3,istatus) if(istatus/=0) then @@ -1835,7 +1835,7 @@ subroutine ensemble_forward_model(cvec,a_en,ibin) ipx=1 -!$omp parallel do schedule(dynamic,1) private(j,n,ic3,k,i,ipic,ig,iaens) +!$omp parallel do schedule(static,1) private(j,n,ic3,k,i,ipic,ig,iaens) do k=1,km do ic3=1,nc3d ipic=ipc3d(ic3) @@ -1860,7 +1860,6 @@ subroutine ensemble_forward_model(cvec,a_en,ibin) enddo enddo -!$omp parallel do schedule(dynamic,1) private(j,n,k,i,ic2,ipic,ig,iaens) do ic2=1,nc2d ipic=ipc2d(ic2) do j=1,jm @@ -2012,7 +2011,7 @@ subroutine ensemble_forward_model_dual_res(cvec,a_en,ibin) im=work_ens%grid%im jm=work_ens%grid%jm km=work_ens%grid%km -!$omp parallel do schedule(dynamic,1) private(j,n,ic3,k,i,ipic,ig,iaens) +!$omp parallel do schedule(static,1) private(j,n,ic3,k,i,ipic,ig,iaens) do k=1,km do ic3=1,nc3d ipic=ipc3d(ic3) @@ -2036,7 +2035,6 @@ subroutine ensemble_forward_model_dual_res(cvec,a_en,ibin) enddo enddo enddo -!$omp parallel do schedule(dynamic,1) private(j,n,k,i,ic2,ipic,ig,iaens) do ic2=1,nc2d ipic=ipc2d(ic2) do j=1,jm @@ -2189,7 +2187,7 @@ subroutine ensemble_forward_model_ad(cvec,a_en,ibin) endif ipx=1 -!$omp parallel do schedule(dynamic,1) private(j,n,ic3,k,i,ic2,ipic,ig,iaens) +!$omp parallel do schedule(static,1) private(j,n,ic3,k,i,ic2,ipic,ig,iaens) do n=1,n_ens do ig=1,ntotensgrp do ic3=1,nc3d @@ -2206,6 +2204,7 @@ subroutine ensemble_forward_model_ad(cvec,a_en,ibin) enddo endif ! iaens>0 enddo + do ic2=1,nc2d iaens=ensgrp2aensgrp(ig,ic2+nc3d,ibin) if(iaens>0) then @@ -2354,7 +2353,7 @@ subroutine ensemble_forward_model_ad_dual_res(cvec,a_en,ibin) im=a_en(1,1)%grid%im jm=a_en(1,1)%grid%jm km=a_en(1,1)%grid%km -!$omp parallel do schedule(dynamic,1) private(j,n,ic3,k,i,ic2,ipic,ig,iaens) +!$omp parallel do schedule(static,1) private(j,n,ic3,k,i,ic2,ipic,ig,iaens) do n=1,n_ens do ig=1,ntotensgrp do ic3=1,nc3d @@ -2686,7 +2685,7 @@ subroutine sqrt_beta_s_mult_cvec(grady) endif ! multiply by sqrt_beta_s -!$omp parallel do schedule(dynamic,1) private(ic3,ic2,k,j,i,ii) +!$omp parallel do schedule(static,1) private(ic3,ic2,k,j,i,ii) do j=1,lon2 do ii=1,nsubwin do ic3=1,nc3d @@ -2784,7 +2783,7 @@ subroutine sqrt_beta_s_mult_bundle(grady) endif ! multiply by sqrt_beta_s -!$omp parallel do schedule(dynamic,1) private(ic3,ic2,k,j,i) +!$omp parallel do schedule(static,1) private(ic3,ic2,k,j,i) do j=1,lon2 do ic3=1,nc3d ! check for ozone and skip if oz_univ_static = true @@ -2864,12 +2863,12 @@ subroutine sqrt_beta_e_mult_cvec(grady) call timer_ini('sqrt_beta_e_mult') ! multiply by sqrt_beta_e -!$omp parallel do schedule(dynamic,1) private(nn,k,j,i,ii,ig) - do j=1,grd_ens%lon2 +!$omp parallel do schedule(static,1) private(nn,k,j,i,ii,ig) + do nn=1,n_ens do ii=1,nsubwin do ig=1,naensgrp - do nn=1,n_ens - do k=1,nsig + do k=1,nsig + do j=1,grd_ens%lon2 do i=1,grd_ens%lat2 grady%aens(ii,ig,nn)%r3(1)%q(i,j,k) = sqrt_beta_e(k)*grady%aens(ii,ig,nn)%r3(1)%q(i,j,k) enddo @@ -2931,11 +2930,11 @@ subroutine sqrt_beta_e_mult_bundle(aens) call timer_ini('sqrt_beta_e_mult') ! multiply by sqrt_beta_e -!$omp parallel do schedule(dynamic,1) private(nn,k,j,i,ig) - do j=1,grd_ens%lon2 +!$omp parallel do schedule(static,1) private(nn,k,j,i,ig) + do nn=1,n_ens do ig=1,naensgrp - do nn=1,n_ens - do k=1,nsig + do k=1,nsig + do j=1,grd_ens%lon2 do i=1,grd_ens%lat2 aens(ig,nn)%r3(1)%q(i,j,k) = sqrt_beta_e(k)*aens(ig,nn)%r3(1)%q(i,j,k) enddo @@ -2993,19 +2992,18 @@ subroutine init_sf_xy(jcap_in) integer(i_kind),intent(in ) :: jcap_in - integer(i_kind) i,ii,j,k,l,n,jcap,kk,nsigend,ig - real(r_kind),allocatable::g(:),gsave(:) + integer(i_kind) i,ii,j,igg,k,l,n,jcap,kk,nsigend,ig + real(r_kind),allocatable::g(:),gtemp(:) real(r_kind) factor real(r_kind),allocatable::rkm(:),f(:,:),f0(:,:) real(r_kind) ftest(grd_loc%nlat,grd_loc%nlon,grd_loc%kbegin_loc:grd_loc%kend_alloc) real(r_single) out1(grd_ens%nlon,grd_ens%nlat) - real(r_single),allocatable::pn0_npole(:) + real(r_single) pn0_npole real(r_kind) s_ens_h_min real(r_kind) rlats_ens_local(grd_ens%nlat) real(r_kind) rlons_ens_local(grd_ens%nlon) character(5) mapname logical make_test_maps - logical,allocatable,dimension(:)::ksame integer(i_kind) nord_sploc2ens integer(i_kind) nlon_sploc0,nlon_sploc,nlat_sploc,num_fields logical print_verbose @@ -3157,101 +3155,107 @@ subroutine init_sf_xy(jcap_in) if(.not.allocated(spectral_filter)) allocate(spectral_filter(naensloc,sp_loc%nc,grd_sploc%nsig)) if(.not.allocated(sqrt_spectral_filter)) allocate(sqrt_spectral_filter(naensloc,sp_loc%nc,grd_sploc%nsig)) - allocate(g(sp_loc%nc),gsave(sp_loc%nc)) - allocate(pn0_npole(0:sp_loc%jcap)) - allocate(ksame(grd_sploc%nsig)) + allocate(g(sp_loc%nc),gtemp(sp_loc%nc)) do ig=1,naensloc - ksame=.false. - do k=2,grd_sploc%nsig - if(s_ens_hv(k,ig) == s_ens_hv(k-1,ig))ksame(k)=.true. - enddo spectral_filter(ig,:,:)=zero - do k=1,grd_sploc%nsig - if(ksame(k))then - spectral_filter(ig,:,k)=spectral_filter(ig,:,k-1) - else + level_loop: do k=1,grd_sploc%nsig + do kk=1,k-1 + if(s_ens_hv(k,ig) == s_ens_hv(kk,ig))then + spectral_filter(ig,:,k)=spectral_filter(ig,:,k-1) + cycle level_loop + end if + end do + if(ig > 1)then + do igg=1,ig-1 + do kk=1,grd_sploc%nsig + if(s_ens_hv(k,ig) == s_ens_hv(kk,igg))then + spectral_filter(ig,:,k)=spectral_filter(igg,:,kk) + cycle level_loop + end if + end do + end do + end if + + do i=1,grd_sploc%nlat + f0(i,1)=exp(-half*(rkm(i)/s_ens_hv(k,ig))**2) + enddo + + + do j=2,grd_sploc%nlon do i=1,grd_sploc%nlat - f0(i,1)=exp(-half*(rkm(i)/s_ens_hv(k,ig))**2) + f0(i,j)=f0(i,1) enddo + end do - do j=2,grd_sploc%nlon - do i=1,grd_sploc%nlat - f0(i,j)=f0(i,1) - enddo - enddo - call general_g2s0(grd_sploc,sp_loc,g,f0) + call general_g2s0(grd_sploc,sp_loc,g,f0) - call general_s2g0(grd_sploc,sp_loc,g,f) + call general_s2g0(grd_sploc,sp_loc,g,f) -! adjust so value at np = 1 - f=f/f(grd_sploc%nlat,1) - f0=f - call general_g2s0(grd_sploc,sp_loc,g,f) - call general_s2g0(grd_sploc,sp_loc,g,f) - if(mype == 0)then - nsigend=k - do kk=k+1,grd_sploc%nsig - if(s_ens_hv(kk,ig) /= s_ens_hv(k,ig))exit - nsigend=nsigend+1 - enddo - write(6,900)k,nsigend,sp_loc%jcap,s_ens_hv(k,ig),maxval(abs(f0-f)) -900 format(' in init_sf_xy, jcap,s_ens_hv(',i5,1x,'-',i5,'), max diff(f0-f)=', & +! adjust so value at np = 1 + f=f/f(grd_sploc%nlat,1) + f0=f + call general_g2s0(grd_sploc,sp_loc,g,f) + call general_s2g0(grd_sploc,sp_loc,g,f) + if(mype == 0)then + nsigend=k + do kk=k+1,grd_sploc%nsig + if(s_ens_hv(kk,ig) /= s_ens_hv(k,ig))exit + nsigend=nsigend+1 + enddo + write(6,900)k,nsigend,sp_loc%jcap,s_ens_hv(k,ig),maxval(abs(f0-f)) +900 format(' in init_sf_xy, jcap,s_ens_hv(',i5,1x,'-',i5,'), max diff(f0-f)=', & i10,f10.2,e20.10) - end if + end if -! correct spectrum by dividing by pn0_npole - gsave=g +! correct spectrum by dividing by pn0_npole -! obtain pn0_npole - do n=0,sp_loc%jcap - g=zero - g(2*n+1)=one - call general_s2g0(grd_sploc,sp_loc,g,f) - pn0_npole(n)=f(grd_sploc%nlat,1) - enddo +! obtain pn0_npole +!$omp parallel do schedule(static,1) private(n,gtemp,f) + do n=0,sp_loc%jcap + gtemp=zero + gtemp(2*n+1)=one + call general_s2g0(grd_sploc,sp_loc,gtemp,f) + pn0_npole=f(grd_sploc%nlat,1) + g(2*n+1)=g(2*n+1)/pn0_npole + enddo - g=zero - do n=0,sp_loc%jcap - g(2*n+1)=gsave(2*n+1)/pn0_npole(n) - enddo -! obtain spectral_filter +! obtain spectral_filter - ii=0 - do l=0,sp_loc%jcap - if(ig>naensgrp) then - factor=one/g(1) + ii=0 + do l=0,sp_loc%jcap + if(ig>naensgrp) then + factor=one/g(1) + else + factor=one + if(l>0) factor=half + end if + do n=l,sp_loc%jcap + ii=ii+1 + if(sp_loc%factsml(ii)) then + spectral_filter(ig,ii,k)=zero else - factor=one - if(l>0) factor=half + spectral_filter(ig,ii,k)=factor*g(2*n+1) + end if + ii=ii+1 + if(l == 0 .or. sp_loc%factsml(ii)) then + spectral_filter(ig,ii,k)=zero + else + spectral_filter(ig,ii,k)=factor*g(2*n+1) end if - do n=l,sp_loc%jcap - ii=ii+1 - if(sp_loc%factsml(ii)) then - spectral_filter(ig,ii,k)=zero - else - spectral_filter(ig,ii,k)=factor*g(2*n+1) - end if - ii=ii+1 - if(l == 0 .or. sp_loc%factsml(ii)) then - spectral_filter(ig,ii,k)=zero - else - spectral_filter(ig,ii,k)=factor*g(2*n+1) - end if - enddo enddo - end if - enddo + enddo + enddo level_loop enddo !ig loop - deallocate(g,gsave,pn0_npole,ksame) + deallocate(g,gtemp) ! Compute sqrt(spectral_filter). Ensure spectral_filter >=0 zero -!$omp parallel do schedule(dynamic,1) private(k,i) +!$omp parallel do schedule(static,1) private(k,i) do ig=1,naensloc do k=1,grd_sploc%nsig do i=1,sp_loc%nc - if (spectral_filter(ig,i,k) < zero) spectral_filter(ig,i,k)=zero + spectral_filter(ig,i,k) = max(spectral_filter(ig,i,k),zero) sqrt_spectral_filter(ig,i,k) = sqrt(spectral_filter(ig,i,k)) end do end do @@ -3337,13 +3341,14 @@ subroutine sf_xy(ig,f,k_start,k_end) if(.not.use_localization_grid) then if(ig>naensgrp) then +!$omp parallel do schedule(static,1) private(k,g) do k=k_start,k_end call general_g2s0(grd_ens,sp_loc,g,f(:,:,k)) g(:)=g(:)*spectral_filter(ig,:,k_index(k)) call general_s2g0(grd_ens,sp_loc,g,f(:,:,k)) enddo else -!$omp parallel do schedule(dynamic,1) private(k) +!$omp parallel do schedule(static,1) private(k) do k=k_start,k_end call sfilter(grd_ens,sp_loc,spectral_filter(ig,:,k_index(k)),f(1,1,k)) enddo @@ -3353,6 +3358,7 @@ subroutine sf_xy(ig,f,k_start,k_end) vector=.false. if(ig>naensgrp) then +!$omp parallel do schedule(static,1) private(k,g,work) do k=k_start,k_end call g_agrid2egrid(p_sploc2ens,work,f(:,:,k:k),k,k,vector(k:k)) call general_g2s0(grd_ens,sp_loc,g,f(:,:,k)) @@ -3361,7 +3367,7 @@ subroutine sf_xy(ig,f,k_start,k_end) call g_egrid2agrid(p_sploc2ens,work,f(:,:,k:k),k,k,vector(k:k)) enddo else -!$omp parallel do schedule(dynamic,1) private(k,work) +!$omp parallel do schedule(static,1) private(k,work) do k=k_start,k_end call g_egrid2agrid_ad(p_sploc2ens,work,f(:,:,k:k),k,k,vector(k:k)) call sfilter(grd_ens,sp_loc,spectral_filter(ig,:,k_index(k)),f(1,1,k)) @@ -3421,6 +3427,7 @@ subroutine sqrt_sf_xy(ig,z,f,k_start,k_end) if(.not.use_localization_grid) then +!$omp parallel do schedule(static,1) private(k,g) do k=k_start,k_end g(:)=z(:,k)*sqrt_spectral_filter(ig,:,k_index(k)) call general_s2g0(grd_ens,sp_loc,g,f(:,:,k)) @@ -3429,6 +3436,7 @@ subroutine sqrt_sf_xy(ig,z,f,k_start,k_end) else vector=.false. +!$omp parallel do schedule(static,1) private(k,g,work) do k=k_start,k_end g(:)=z(:,k)*sqrt_spectral_filter(ig,:,k_index(k)) call general_s2g0(grd_sploc,sp_loc,g,work) @@ -3488,6 +3496,7 @@ subroutine sqrt_sf_xy_ad(ig,z,f,k_start,k_end) if(.not.use_localization_grid) then +!$omp parallel do schedule(static,1) private(k,g) do k=k_start,k_end call general_s2g0_ad(grd_ens,sp_loc,g,f(:,:,k)) z(:,k)=g(:)*sqrt_spectral_filter(ig,:,k_index(k)) @@ -3496,6 +3505,7 @@ subroutine sqrt_sf_xy_ad(ig,z,f,k_start,k_end) else vector=.false. +!$omp parallel do schedule(static,1) private(k,g,work) do k=k_start,k_end call g_egrid2agrid_ad(p_sploc2ens,work,f(:,:,k:k),k,k,vector(k:k)) call general_s2g0_ad(grd_sploc,sp_loc,g,work) @@ -3608,7 +3618,7 @@ subroutine bkerror_a_en(grady) ! Declare local variables integer(i_kind) ii,ip,istatus,k,ig,ig2 real(r_kind),allocatable,dimension(:,:) :: z - real(r_kind),allocatable,dimension(:) :: ztmp + real(r_kind),allocatable,dimension(:) :: z2 ! Initialize timer call timer_ini('bkerror_a_en') @@ -3624,34 +3634,29 @@ subroutine bkerror_a_en(grady) call sqrt_beta_e_mult(grady) ! Apply variances, as well as vertical & horizontal parts of background error -! !$omp parallel do schedule(dynamic,1) private(ii) - do ii=1,nsubwin - if (naensgrp==1) then + if (naensgrp==1) then + do ii=1,nsubwin call bkgcov_a_en_new_factorization(1,grady%aens(ii,1,1:n_ens)) - else - allocate(z(naensgrp,nval_lenz_en)) + end do + else + allocate(z(nval_lenz_en,naensgrp)) + allocate(z2(nval_lenz_en)) + do ii=1,nsubwin do ig=1,naensgrp - call ckgcov_a_en_new_factorization_ad(ig,z(ig,:),grady%aens(ii,ig,1:n_ens)) + call ckgcov_a_en_new_factorization_ad(ig,z(1,ig),grady%aens(ii,ig,1:n_ens)) enddo - allocate(ztmp(naensgrp)) - do k=1,nval_lenz_en - ztmp=zero - do ig=1,naensgrp - do ig2=1,naensgrp - ztmp(ig) = ztmp(ig) + z(ig2,k) * alphacvarsclgrpmat(ig,ig2) + do ig=1,naensgrp + z2=zero + do ig2=1,naensgrp + do k=1,nval_lenz_en + z2(k) = z2(k) + z(k,ig2) * alphacvarsclgrpmat(ig,ig2) enddo enddo - do ig=1,naensgrp - z(ig,k) = ztmp(ig) - enddo - enddo - deallocate(ztmp) - do ig=1,naensgrp - call ckgcov_a_en_new_factorization(ig,z(ig,:),grady%aens(ii,ig,1:n_ens)) + call ckgcov_a_en_new_factorization(ig,z2,grady%aens(ii,ig,1:n_ens)) enddo - deallocate(z) - endif - enddo + enddo + deallocate(z,z2) + endif ! multiply by sqrt_beta_e_mult call sqrt_beta_e_mult(grady) @@ -3710,11 +3715,7 @@ subroutine bkgcov_a_en_new_factorization(ig,a_en) real(r_kind) hwork(grd_loc%inner_vars,grd_loc%nlat,grd_loc%nlon,grd_loc%kbegin_loc:grd_loc%kend_alloc) real(r_kind),allocatable,dimension(:):: a_en_work - call gsi_bundlegetpointer(a_en(1),'a_en',ipnt,istatus) - if(istatus/=0) then - write(6,*)'bkgcov_a_en_new_factorization: trouble getting pointer to ensemble CV' - call stop2(999) - endif + ipnt=1 ! Apply vertical smoother on each ensemble member ! To avoid my having to touch the general sub2grid and grid2sub, @@ -3725,7 +3726,7 @@ subroutine bkgcov_a_en_new_factorization(ig,a_en) call stop2(999) endif iadvance=1 ; iback=2 -!$omp parallel do schedule(dynamic,1) private(k,ii,is,ie) +!$omp parallel do schedule(static,1) private(k,ii,is,ie) do k=1,n_ens call new_factorization_rf_z(a_en(k)%r3(ipnt)%q,iadvance,iback,ig) ii=(k-1)*a_en(1)%ndim @@ -3755,7 +3756,7 @@ subroutine bkgcov_a_en_new_factorization(ig,a_en) ! Retrieve ensemble components from long vector ! Apply vertical smoother on each ensemble member iadvance=2 ; iback=1 -!$omp parallel do schedule(dynamic,1) private(k,ii,is,ie) +!$omp parallel do schedule(static,1) private(k,ii,is,ie) do k=1,n_ens ii=(k-1)*a_en(1)%ndim is=ii+1 @@ -3865,9 +3866,10 @@ subroutine ckgcov_a_en_new_factorization(ig,z,a_en) deallocate(a_en_work) ! Apply vertical smoother on each ensemble member + iadvance=2 ; iback=1 +!$omp parallel do schedule(static,1) private(k) do k=1,n_ens - iadvance=2 ; iback=1 call new_factorization_rf_z(a_en(k)%r3(ipnt)%q,iadvance,iback,ig) enddo @@ -3937,9 +3939,10 @@ subroutine ckgcov_a_en_new_factorization_ad(ig,z,a_en) endif ! Apply vertical smoother on each ensemble member + iadvance=1 ; iback=2 +!$omp parallel do schedule(static,1) private(k) do k=1,n_ens - iadvance=1 ; iback=2 call new_factorization_rf_z(a_en(k)%r3(ipnt)%q,iadvance,iback,ig) enddo @@ -4125,7 +4128,7 @@ subroutine hybens_grid_setup end if if(global_spectral_filter_sd .and. nsclgrp > 1)then - allocate(spc_multwgt(0:jcap_ens,nsclgrp)) + allocate(spc_multwgt(sp_ens%nc,nsclgrp)) allocate(spcwgt_params(4,nsclgrp)) spc_multwgt=1.0 diff --git a/src/gsi/read_iasi.f90 b/src/gsi/read_iasi.f90 index 038188f92a..4e688cd36e 100644 --- a/src/gsi/read_iasi.f90 +++ b/src/gsi/read_iasi.f90 @@ -829,7 +829,11 @@ subroutine read_iasi(mype,val_iasi,ithin,isfcalc,rmesh,jsatid,gstime,& ! Prevent out of bounds reference from temperature if ( bufr_index(l) == 0 ) cycle i = bufr_index(l) - data_all(l+nreal,itx) = temperature(i) ! brightness temerature + if(i /= 0)then + data_all(l+nreal,itx) = temperature(i) ! brightness temerature + else + data_all(l+nreal,itx) = tbmin + end if end do nrec(itx)=irec diff --git a/src/gsi/read_prepbufr.f90 b/src/gsi/read_prepbufr.f90 index b72e584155..9efd06418c 100644 --- a/src/gsi/read_prepbufr.f90 +++ b/src/gsi/read_prepbufr.f90 @@ -2050,9 +2050,9 @@ subroutine read_prepbufr(nread,ndata,nodata,infile,obstype,lunout,twindin,sis,& if(oelev>7000.0_r_kind) cycle loop_k_levs if(abs(diffvv)>5.0_r_kind.and.oelev<5000.0_r_kind) cycle loop_k_levs ! write(6,*)'sliu diffuu,vv::',diffuu, diffvv - uob=0.0 - vob=0.0 - oelev=0.0 + uob=zero + vob=zero + oelev=zero tkk=0 do ikkk=k,klev diffhgt=obsdat(4,ikkk)-obsdat(4,k) diff --git a/src/gsi/setupcldtot.F90 b/src/gsi/setupcldtot.F90 index a30ef92a90..694c8f1df3 100755 --- a/src/gsi/setupcldtot.F90 +++ b/src/gsi/setupcldtot.F90 @@ -45,13 +45,22 @@ subroutine setupcldtot(obsLL,odiagLL,lunin,mype,bwork,awork,nele,nobs,is,conv_di use mpeu_util, only: die,perr use kinds, only: r_kind,r_single,r_double,i_kind + use constants, only: zero,one,r1000,r10,r100 + use constants, only: huge_single,wgtlim,three + use constants, only: tiny_r_kind,five,half,two,r0_01 + use constants, only: zero,one, h1000 + use obsmod, only: rmiss_single,time_offset + use obsmod, only: netcdf_diag, binary_diag, dirname, ianldate + use obsmod, only: luse_obsdiag + use m_obsLList, only: obsLList + use m_obsdiagNode, only: obs_diags use m_obsNode, only: obsNode use m_qNode, only: qNode use m_qNode, only: qNode_appendto + use m_dtime, only: dtime_setup, dtime_check, dtime_show use gsi_4dvar, only: nobs_bins,hr_obsbin - use obsmod, only: netcdf_diag, binary_diag, dirname, ianldate use nc_diag_write_mod,only: nc_diag_init, nc_diag_header,nc_diag_metadata, & nc_diag_write, nc_diag_data2d use nc_diag_read_mod, only: nc_diag_read_init,nc_diag_read_get_dim, & @@ -60,26 +69,18 @@ subroutine setupcldtot(obsLL,odiagLL,lunin,mype,bwork,awork,nele,nobs,is,conv_di use guess_grids, only: geop_hgtl,hrdifsig,nfldsig,ges_tsen,ges_prsl use gridmod, only: nsig,get_ijk - use constants, only: zero,one,r1000,r10,r100 - use constants, only: huge_single,wgtlim,three - use constants, only: tiny_r_kind,five,half,two,r0_01 use qcmod, only: npres_print use jfunc, only: jiter use convinfo, only: nconvtype use convinfo, only: icsubtype - use m_dtime, only: dtime_setup, dtime_check, dtime_show use rapidrefresh_cldsurf_mod, only: i_cloud_q_innovation, & cld_bld_hgt,i_ens_mean use gsi_bundlemod, only : gsi_bundlegetpointer use gsi_metguess_mod, only : gsi_metguess_get,gsi_metguess_bundle use mpimod, only: mpi_comm_world - use constants, only: zero,one, h1000 use gsdcloudlib_pseudoq_mod, only: cloudLWC_pseudo,cloudCover_Surface_col - use m_obsLList, only: obsLList - use m_obsdiagNode, only: obs_diags - use obsmod, only: luse_obsdiag implicit none diff --git a/src/gsi/setuprad.f90 b/src/gsi/setuprad.f90 index ebdd8de52a..f686e19332 100644 --- a/src/gsi/setuprad.f90 +++ b/src/gsi/setuprad.f90 @@ -776,6 +776,12 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& call stop2(275) end if + if (abi2km .and. regional) then + abi2km_bc = zero + abi2km_bc(2) = 233.5_r_kind + abi2km_bc(3) = 241.7_r_kind + abi2km_bc(4) = 250.5_r_kind + end if ! PROCESSING OF SATELLITE DATA ! Loop over data in this block @@ -1085,10 +1091,6 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& endif predbias=zero - abi2km_bc = zero - abi2km_bc(2) = 233.5_r_kind - abi2km_bc(3) = 241.7_r_kind - abi2km_bc(4) = 250.5_r_kind !$omp parallel do schedule(dynamic,1) private(i,mm,j,k,tlap,node,bias) do i=1,nchanl mm=ich(i) diff --git a/src/gsi/stpcalc.f90 b/src/gsi/stpcalc.f90 index dd60703ce2..849d2ff5c9 100644 --- a/src/gsi/stpcalc.f90 +++ b/src/gsi/stpcalc.f90 @@ -263,7 +263,7 @@ subroutine stpcalc(stpinout,sval,sbias,dirx,dval,dbias, & real(r_quad),parameter:: one_tenth_quad = 0.1_r_quad ! Declare local variables - integer(i_kind) i,j,mm1,ii,iis,final_ii,ibin,ipenloc,it + integer(i_kind) i,j,mm1,ii,final_ii,ibin,ipenloc,it integer(i_kind) istp_use,nstep,nsteptot,kprt real(r_quad),dimension(4,ipen):: pbc real(r_quad),dimension(4,nobs_type):: pbcjo @@ -430,7 +430,6 @@ subroutine stpcalc(stpinout,sval,sbias,dirx,dval,dbias, & pbc=zero_quad pjcalc=.false. if(iter == 0 .and. kprt >= 2 .and. ii == 1)pjcalc=.true. - iis=ii ! Delta stepsize sges(1)= stp(ii-1)