Skip to content
Permalink
Browse files

Remove MAP_CONSISTENT input keyword by hard coding it to TRUE

  • Loading branch information
oschuett committed Feb 14, 2020
1 parent ae38e45 commit 4470b2eba22a6bcad7ee1e7f9e63c02217d4b4b1
@@ -281,7 +281,6 @@ MODULE cp_control_types
LOGICAL :: do_kg
LOGICAL :: commensurate_mgrids
LOGICAL :: realspace_mgrids
LOGICAL :: map_consistent
LOGICAL :: gapw, gapw_xc, gpw, pao
LOGICAL :: lrigpw, rigpw
LOGICAL :: lri_optbas
@@ -726,7 +726,6 @@ SUBROUTINE read_qs_section(qs_control, qs_section)
CALL section_vals_val_get(qs_section, "KG_METHOD", l_val=qs_control%do_kg)
! Logicals
CALL section_vals_val_get(qs_section, "MAP_CONSISTENT", l_val=qs_control%map_consistent)
CALL section_vals_val_get(qs_section, "REF_EMBED_SUBSYS", l_val=qs_control%ref_embed_subsys)
CALL section_vals_val_get(qs_section, "CLUSTER_EMBED_SUBSYS", l_val=qs_control%cluster_embed_subsys)
CALL section_vals_val_get(qs_section, "HIGH_LEVEL_EMBED_SUBSYS", l_val=qs_control%high_level_embed_subsys)
@@ -1665,10 +1664,6 @@ SUBROUTINE write_qs_control(qs_control, dft_section)
"QS| Grid level progression factor:", qs_control%progression_factor
WRITE (UNIT=output_unit, FMT="(T2,A,T71,F10.1)") &
"QS| Relative density cutoff [a.u.]:", qs_control%relative_cutoff
IF (qs_control%map_consistent) THEN
WRITE (UNIT=output_unit, FMT="(T2,A)") &
"QS| Consistent realspace mapping and integration "
ENDIF
WRITE (UNIT=output_unit, FMT="(T2,A,T73,ES8.1)") &
"QS| Interaction thresholds: eps_pgf_orb:", &
qs_control%eps_pgf_orb, &
@@ -3161,16 +3161,6 @@ SUBROUTINE create_qs_section(section)
CALL section_add_keyword(section, keyword)
CALL keyword_release(keyword)

CALL keyword_create(keyword, __LOCATION__, name="MAP_CONSISTENT", &
description="Compute the exact derivative (Hks) of the energy with respect to the density matrix. "// &
"This is slightly more expensive than using an approximate computation, "// &
"but consistent mapping can improve the stability of the SCF procedure, "// &
"especially for a tight EPS_SCF and a less tight EPS_DEFAULT.", &
usage="MAP_CONSISTENT FALSE", &
default_l_val=.TRUE., lone_keyword_l_val=.TRUE.)
CALL section_add_keyword(section, keyword)
CALL keyword_release(keyword)

CALL keyword_create(keyword, __LOCATION__, name="REF_EMBED_SUBSYS", &
description="A total, reference, system in DFT embedding. ", &
usage="REF_EMBED_SUBSYS FALSE", &
@@ -550,7 +550,7 @@ SUBROUTINE calculate_lri_rho_elec(lri_rho_g, lri_rho_r, qs_env, &
INTEGER, DIMENSION(:), POINTER :: atom_list, la_max, la_min, mylmax, &
npgfa, nsgfa
INTEGER, DIMENSION(:, :), POINTER :: first_sgfa
LOGICAL :: found, map_consistent
LOGICAL :: found
LOGICAL, ALLOCATABLE, DIMENSION(:) :: map_it
LOGICAL, ALLOCATABLE, DIMENSION(:, :) :: map_it2
REAL(KIND=dp) :: eps_rho_rspace, rab2, zetp
@@ -592,7 +592,6 @@ SUBROUTINE calculate_lri_rho_elec(lri_rho_g, lri_rho_r, qs_env, &

cube_info => pw_env%cube_info
eps_rho_rspace = dft_control%qs_control%eps_rho_rspace
map_consistent = dft_control%qs_control%map_consistent
gridlevel_info => pw_env%gridlevel_info

! *** set up the pw multi-grids *** !
@@ -711,7 +710,7 @@ SUBROUTINE calculate_lri_rho_elec(lri_rho_g, lri_rho_r, qs_env, &
cube_info=cube_info(igrid_level), &
eps_rho_rspace=eps_rho_rspace, &
ga_gb_function=GRID_FUNC_AB, &
map_consistent=map_consistent, &
map_consistent=.TRUE., &
lmax_global=lmax_global)
ENDIF
END DO
@@ -830,7 +829,7 @@ SUBROUTINE calculate_lri_rho_elec(lri_rho_g, lri_rho_r, qs_env, &
ra, rab, 1.0_dp, pab, na1, nb1, &
rs_grid, cell, cube_info(igrid_level), &
eps_rho_rspace, ga_gb_function=GRID_FUNC_AB, &
map_consistent=map_consistent, lmax_global=lmax_global)
map_consistent=.TRUE., lmax_global=lmax_global)
END IF
END DO
END DO
@@ -1434,7 +1433,7 @@ SUBROUTINE calculate_rho_elec(matrix_p, matrix_p_kp, rho, rho_gspace, total_rho,
INTEGER, DIMENSION(:), POINTER :: la_max, la_min, lb_max, lb_min, mylmax, &
npgfa, npgfb, nsgfa, nsgfb
INTEGER, DIMENSION(:, :), POINTER :: first_sgfa, first_sgfb
LOGICAL :: atom_pair_changed, distributed_rs_grids, do_kp, found, map_consistent, &
LOGICAL :: atom_pair_changed, distributed_rs_grids, do_kp, found, &
my_compute_grad, my_compute_tau, my_soft, use_subpatch
REAL(KIND=dp) :: eps_rho_rspace, rab2, scale, zetp
REAL(KIND=dp), DIMENSION(3) :: ra, rab, rab_inv, rb
@@ -1643,7 +1642,6 @@ SUBROUTINE calculate_rho_elec(matrix_p, matrix_p_kp, rho, rho_gspace, total_rho,
END IF

eps_rho_rspace = dft_control%qs_control%eps_rho_rspace
map_consistent = dft_control%qs_control%map_consistent

! *** Initialize working density matrix ***
! distributed rs grids require a matrix that will be changed
@@ -1685,7 +1683,7 @@ SUBROUTINE calculate_rho_elec(matrix_p, matrix_p_kp, rho, rho_gspace, total_rho,
!$OMP PARALLEL DEFAULT(NONE), &
!$OMP SHARED(ntasks,tasks,nimages,natoms,maxset,maxpgf,particle_set,pabt,workt), &
!$OMP SHARED(my_basis_type,my_soft,deltap,maxco,ncoset,nthread), &
!$OMP SHARED(cell,cube_info,eps_rho_rspace,ga_gb_function, my_idir,map_consistent), &
!$OMP SHARED(cell,cube_info,eps_rho_rspace,ga_gb_function, my_idir), &
!$OMP SHARED(rs_rho,lgrid,gridlevel_info,task_list,qs_kind_set,lmax_global), &
!$OMP PRIVATE(igrid_level,iatom,jatom,iset,jset,ipgf,jpgf,ikind,jkind,pab,work), &
!$OMP PRIVATE(img,img_old,iatom_old,jatom_old,iset_old,jset_old,ikind_old,jkind_old), &
@@ -1850,7 +1848,7 @@ SUBROUTINE calculate_rho_elec(matrix_p, matrix_p_kp, rho, rho_gspace, total_rho,
eps_rho_rspace, &
ga_gb_function=ga_gb_function, &
idir=my_idir, &
map_consistent=map_consistent, use_subpatch=use_subpatch, &
map_consistent=.TRUE., use_subpatch=use_subpatch, &
subpatch_pattern=tasks(itask)%subpatch_pattern, lmax_global=lmax_global)
ELSE
rab_inv = -rab
@@ -1862,7 +1860,7 @@ SUBROUTINE calculate_rho_elec(matrix_p, matrix_p_kp, rho, rho_gspace, total_rho,
eps_rho_rspace, &
ga_gb_function=ga_gb_function, &
idir=my_idir, &
map_consistent=map_consistent, use_subpatch=use_subpatch, &
map_consistent=.TRUE., use_subpatch=use_subpatch, &
subpatch_pattern=tasks(itask)%subpatch_pattern, lmax_global=lmax_global)
END IF
END DO loop_tasks
@@ -1973,7 +1971,7 @@ SUBROUTINE calculate_drho_elec(matrix_p, matrix_p_kp, drho, drho_gspace, qs_env,
npgfa, npgfb, nsgfa, nsgfb
INTEGER, DIMENSION(:, :), POINTER :: first_sgfa, first_sgfb
LOGICAL :: atom_pair_changed, distributed_rs_grids, &
do_kp, found, map_consistent, my_soft, &
do_kp, found, my_soft, &
use_subpatch
REAL(KIND=dp) :: eps_rho_rspace, scale, zetp
REAL(KIND=dp), DIMENSION(3) :: ra, rab, rab_inv, rb
@@ -2100,7 +2098,6 @@ SUBROUTINE calculate_drho_elec(matrix_p, matrix_p_kp, drho, drho_gspace, qs_env,
END DO

eps_rho_rspace = dft_control%qs_control%eps_rho_rspace
map_consistent = dft_control%qs_control%map_consistent

! *** Initialize working density matrix ***
! distributed rs grids require a matrix that will be changed
@@ -2289,7 +2286,7 @@ SUBROUTINE calculate_drho_elec(matrix_p, matrix_p_kp, drho, drho_gspace, qs_env,
eps_rho_rspace, &
ga_gb_function=GRID_FUNC_DABpADB, &
idir=idir, &
map_consistent=map_consistent, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
map_consistent=.TRUE., use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
lmax_global=lmax_global)
ELSE
rab_inv = -rab
@@ -2301,7 +2298,7 @@ SUBROUTINE calculate_drho_elec(matrix_p, matrix_p_kp, drho, drho_gspace, qs_env,
eps_rho_rspace, &
ga_gb_function=GRID_FUNC_DABpADB, &
idir=idir, &
map_consistent=map_consistent, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
map_consistent=.TRUE., use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern, &
lmax_global=lmax_global)
END IF
@@ -207,9 +207,9 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
npgfb, nsgfa, nsgfb
INTEGER, DIMENSION(:, :), POINTER :: first_sgfa, first_sgfb
LOGICAL :: atom_pair_changed, atom_pair_done, distributed_grids, do_kp, found, h_duplicated, &
has_threads, map_consistent, my_compute_tau, my_force_adm, my_gapw, new_set_pair_coming, &
has_threads, my_compute_tau, my_force_adm, my_gapw, new_set_pair_coming, &
p_duplicated, pab_required, scatter, use_subpatch, use_virial
REAL(KIND=dp) :: admm_scal_fac, dab, eps_gvg_rspace, &
REAL(KIND=dp) :: admm_scal_fac, dab, eps_rho_rspace, &
scalef, zetp
REAL(KIND=dp), DIMENSION(3) :: force_a, force_b, ra, rab, rab_inv, rb
REAL(KIND=dp), DIMENSION(3, 3) :: my_virial_a, my_virial_b, pv_thread
@@ -347,16 +347,10 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
pv_thread = 0.0_dp
force_thread = 0.0_dp

map_consistent = dft_control%qs_control%map_consistent
IF (map_consistent) THEN
! needs to be consistent with rho_rspace
eps_gvg_rspace = dft_control%qs_control%eps_rho_rspace
ELSE
eps_gvg_rspace = dft_control%qs_control%eps_gvg_rspace
ENDIF
! needs to be consistent with rho_rspace
eps_rho_rspace = dft_control%qs_control%eps_rho_rspace

pab_required = (PRESENT(pmat) .OR. PRESENT(pmat_kp)) &
.AND. (calculate_forces .OR. .NOT. map_consistent)
pab_required = (PRESENT(pmat) .OR. PRESENT(pmat_kp)) .AND. calculate_forces

CALL get_qs_kind_set(qs_kind_set=qs_kind_set, &
maxco=maxco, &
@@ -462,7 +456,7 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
!$OMP SHARED(workt,habt,hdabt,hadbt,pabt,tasks,particle_set,natom,maxset), &
!$OMP SHARED(maxpgf,my_basis_type,my_gapw,dhmat,deltap,use_virial,admm_scal_fac), &
!$OMP SHARED(pab_required,calculate_forces,ncoset,rs_v,cube_info,my_compute_tau), &
!$OMP SHARED(map_consistent,eps_gvg_rspace,force,virial,cell), &
!$OMP SHARED(eps_rho_rspace,force,virial,cell), &
!$OMP SHARED(gridlevel_info,task_list,block_touched,nthread,qs_kind_set), &
!$OMP SHARED(nimages,do_kp), &
!$OMP PRIVATE(ithread,work,hab,hdab,hadb,pab,iset_old,jset_old), &
@@ -677,10 +671,10 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
ra, rab, rs_v(igrid_level)%rs_grid, cell, &
cube_info(igrid_level), &
hab, pab=pab, o1=na1 - 1, o2=nb1 - 1, &
eps_gvg_rspace=eps_gvg_rspace, &
eps_gvg_rspace=eps_rho_rspace, &
calculate_forces=calculate_forces, &
force_a=force_a, force_b=force_b, &
compute_tau=my_compute_tau, map_consistent=map_consistent, &
compute_tau=my_compute_tau, map_consistent=.TRUE., &
use_virial=use_virial, my_virial_a=my_virial_a, &
my_virial_b=my_virial_b, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
ELSE
@@ -691,10 +685,10 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
rb, rab_inv, rs_v(igrid_level)%rs_grid, cell, &
cube_info(igrid_level), &
hab, pab=pab, o1=nb1 - 1, o2=na1 - 1, &
eps_gvg_rspace=eps_gvg_rspace, &
eps_gvg_rspace=eps_rho_rspace, &
calculate_forces=calculate_forces, &
force_a=force_b, force_b=force_a, &
compute_tau=my_compute_tau, map_consistent=map_consistent, &
compute_tau=my_compute_tau, map_consistent=.TRUE., &
use_virial=use_virial, my_virial_a=my_virial_b, &
my_virial_b=my_virial_a, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
END IF
@@ -706,11 +700,11 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
ra, rab, rs_v(igrid_level)%rs_grid, cell, &
cube_info(igrid_level), &
hab, o1=na1 - 1, o2=nb1 - 1, &
eps_gvg_rspace=eps_gvg_rspace, &
eps_gvg_rspace=eps_rho_rspace, &
calculate_forces=calculate_forces, &
force_a=force_a, force_b=force_b, &
compute_tau=my_compute_tau, &
map_consistent=map_consistent, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
map_consistent=.TRUE., use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
ELSE
rab_inv = -rab
CALL integrate_pgf_product( &
@@ -719,11 +713,11 @@ SUBROUTINE integrate_v_rspace_low(v_rspace, hmat, hmat_kp, pmat, pmat_kp, qs_env
rb, rab_inv, rs_v(igrid_level)%rs_grid, cell, &
cube_info(igrid_level), &
hab, o1=nb1 - 1, o2=na1 - 1, &
eps_gvg_rspace=eps_gvg_rspace, &
eps_gvg_rspace=eps_rho_rspace, &
calculate_forces=calculate_forces, &
force_a=force_b, force_b=force_a, &
compute_tau=my_compute_tau, &
map_consistent=map_consistent, use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
map_consistent=.TRUE., use_subpatch=use_subpatch, subpatch_pattern=tasks(itask)%subpatch_pattern)
END IF
END IF

0 comments on commit 4470b2e

Please sign in to comment.
You can’t perform that action at this time.