Skip to content

Commit

Permalink
make pairwise_dprc model work with MPI (#2818)
Browse files Browse the repository at this point in the history
- make `aparam` accepts `nall` instead of `nloc`. A variable
`fitting_attr/aparam_nall` (dtype=bool) controls the behavior.
  - enable this behavior for se_a_mask, by the way
- fix the shape of atomic energy, which is `nloc` instead of `nall`
- set the minimal `nloc` to 1, as when nloc=0, many OPs (such as
prod_force) throw floating-point exception
- fix backward map when the shape of `nloc` is padded

---------

Signed-off-by: Jinzhe Zeng <jinzhe.zeng@rutgers.edu>
  • Loading branch information
njzjz committed Sep 15, 2023
1 parent ab357f8 commit 0d5737f
Show file tree
Hide file tree
Showing 16 changed files with 45,014 additions and 90 deletions.
4 changes: 3 additions & 1 deletion deepmd/descriptor/se_a_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,12 @@ def build(
dstd = self.dstd

"""
``aparam'' shape is [nframes, natoms]
``aparam'' shape is [nframes, nall]
aparam[:, :] is the real/virtual sign for each atom.
"""
aparam = input_dict["aparam"]
with tf.variable_scope("fitting_attr" + suffix, reuse=reuse):
t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool)
self.mask = tf.cast(aparam, tf.int32)
self.mask = tf.reshape(self.mask, [-1, natoms[1]])

Expand Down
1 change: 1 addition & 0 deletions deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ def _make_node_names(
"spin_attr/ntypes_spin",
"fitting_attr/dfparam",
"fitting_attr/daparam",
"fitting_attr/aparam_nall",
]
elif model_type == "dos":
nodes += [
Expand Down
7 changes: 5 additions & 2 deletions deepmd/model/pairwise_dprc.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def build(
with tf.variable_scope("fitting_attr" + suffix, reuse=reuse):
t_dfparam = tf.constant(0, name="dfparam", dtype=tf.int32)
t_daparam = tf.constant(1, name="daparam", dtype=tf.int32)
t_aparam_nall = tf.constant(True, name="aparam_nall", dtype=tf.bool)
with tf.variable_scope("descrpt_attr" + suffix, reuse=reuse):
t_ntypes = tf.constant(self.ntypes, name="ntypes", dtype=tf.int32)
t_rcut = tf.constant(
Expand Down Expand Up @@ -222,12 +223,14 @@ def build(
virial = virial_qm + virial_qmmm
virial = tf.identity(virial, name="o_virial" + suffix)

backward_qm_map_nloc = tf.slice(backward_qm_map, [0, 0], [-1, natoms[0]])
backward_qmmm_map_nloc = tf.slice(backward_qmmm_map, [0, 0], [-1, natoms[0]])
atom_ener_qm = gather_placeholder(
qm_dict["atom_ener"], backward_qm_map, placeholder=0.0
qm_dict["atom_ener"], backward_qm_map_nloc, placeholder=0.0
)
atom_ener_qmmm = tf.math.segment_sum(
gather_placeholder(
qmmm_dict["atom_ener"], backward_qmmm_map, placeholder=0.0
qmmm_dict["atom_ener"], backward_qmmm_map_nloc, placeholder=0.0
),
qmmm_frame_idx,
)
Expand Down
23 changes: 23 additions & 0 deletions source/api_c/include/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#else
// for C99
#include <stdbool.h>
#endif

/**
Expand Down Expand Up @@ -717,6 +720,16 @@ int DP_DeepPotGetDimFParam(DP_DeepPot* dp);
*/
int DP_DeepPotGetDimAParam(DP_DeepPot* dp);

/**
* @brief Check whether the atomic dimension of atomic parameters is nall
* instead of nloc.
*
* @param[in] dp The DP to use.
* @return true the atomic dimension of atomic parameters is nall
* @return false the atomic dimension of atomic parameters is nloc
*/
bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp);

/**
* @brief Get the type map of a DP.
* @param[in] dp The DP to use.
Expand All @@ -737,6 +750,16 @@ int DP_DeepPotModelDeviGetDimFParam(DP_DeepPotModelDevi* dp);
*/
int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp);

/**
* @brief Check whether the atomic dimension of atomic parameters is nall
* instead of nloc.
*
* @param[in] dp The DP Model Deviation to use.
* @return true the atomic dimension of atomic parameters is nall
* @return false the atomic dimension of atomic parameters is nloc
*/
bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp);

/**
* @brief The deep tensor.
**/
Expand Down
2 changes: 2 additions & 0 deletions source/api_c/include/c_api_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ struct DP_DeepPot {
std::string exception;
int dfparam;
int daparam;
bool aparam_nall;
};

struct DP_DeepPotModelDevi {
Expand All @@ -51,6 +52,7 @@ struct DP_DeepPotModelDevi {
std::string exception;
int dfparam;
int daparam;
bool aparam_nall;
};

struct DP_DeepTensor {
Expand Down
32 changes: 24 additions & 8 deletions source/api_c/include/deepmd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -597,6 +597,7 @@ class DeepPot {
DP_CHECK_OK(DP_DeepPotCheckOK, dp);
dfparam = DP_DeepPotGetDimFParam(dp);
daparam = DP_DeepPotGetDimAParam(dp);
aparam_nall = DP_DeepPotIsAParamNAll(dp);
};

/**
Expand Down Expand Up @@ -771,9 +772,12 @@ class DeepPot {
VALUETYPE *force_ = &force[0];
VALUETYPE *virial_ = &virial[0];
std::vector<VALUETYPE> fparam_, aparam_;
validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam);
validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)),
fparam, aparam);
tile_fparam_aparam(fparam_, nframes, dfparam, fparam);
tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam);
tile_fparam_aparam(aparam_, nframes,
(aparam_nall ? natoms : (natoms - nghost)) * daparam,
aparam);
const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr;
const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr;

Expand Down Expand Up @@ -842,9 +846,12 @@ class DeepPot {
VALUETYPE *atomic_ener_ = &atom_energy[0];
VALUETYPE *atomic_virial_ = &atom_virial[0];
std::vector<VALUETYPE> fparam_, aparam_;
validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam);
validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)),
fparam, aparam);
tile_fparam_aparam(fparam_, nframes, dfparam, fparam);
tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam);
tile_fparam_aparam(aparam_, nframes,
(aparam_nall ? natoms : (natoms - nghost)) * daparam,
aparam);
const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr;
const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr;

Expand Down Expand Up @@ -1039,6 +1046,7 @@ class DeepPot {
DP_DeepPot *dp;
int dfparam;
int daparam;
bool aparam_nall;
template <typename VALUETYPE>
void validate_fparam_aparam(const int &nframes,
const int &nloc,
Expand Down Expand Up @@ -1128,6 +1136,7 @@ class DeepPotModelDevi {
numb_models = models.size();
dfparam = DP_DeepPotModelDeviGetDimFParam(dp);
daparam = DP_DeepPotModelDeviGetDimAParam(dp);
aparam_nall = DP_DeepPotModelDeviIsAParamNAll(dp);
};

/**
Expand Down Expand Up @@ -1173,9 +1182,12 @@ class DeepPotModelDevi {
VALUETYPE *force_ = &force_flat[0];
VALUETYPE *virial_ = &virial_flat[0];
std::vector<VALUETYPE> fparam_, aparam_;
validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam);
validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)),
fparam, aparam);
tile_fparam_aparam(fparam_, nframes, dfparam, fparam);
tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam);
tile_fparam_aparam(aparam_, nframes,
(aparam_nall ? natoms : (natoms - nghost)) * daparam,
aparam);
const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr;
const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr;

Expand Down Expand Up @@ -1250,9 +1262,12 @@ class DeepPotModelDevi {
VALUETYPE *atomic_ener_ = &atom_energy_flat[0];
VALUETYPE *atomic_virial_ = &atom_virial_flat[0];
std::vector<VALUETYPE> fparam_, aparam_;
validate_fparam_aparam(nframes, natoms - nghost, fparam, aparam);
validate_fparam_aparam(nframes, (aparam_nall ? natoms : (natoms - nghost)),
fparam, aparam);
tile_fparam_aparam(fparam_, nframes, dfparam, fparam);
tile_fparam_aparam(aparam_, nframes, (natoms - nghost) * daparam, aparam);
tile_fparam_aparam(aparam_, nframes,
(aparam_nall ? natoms : (natoms - nghost)) * daparam,
aparam);
const VALUETYPE *fparam__ = !fparam_.empty() ? &fparam_[0] : nullptr;
const VALUETYPE *aparam__ = !aparam_.empty() ? &aparam_[0] : nullptr;

Expand Down Expand Up @@ -1448,6 +1463,7 @@ class DeepPotModelDevi {
int numb_models;
int dfparam;
int daparam;
bool aparam_nall;
template <typename VALUETYPE>
void validate_fparam_aparam(const int &nframes,
const int &nloc,
Expand Down
17 changes: 15 additions & 2 deletions source/api_c/src/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ DP_DeepPot::DP_DeepPot() {}
DP_DeepPot::DP_DeepPot(deepmd::DeepPot& dp) : dp(dp) {
dfparam = dp.dim_fparam();
daparam = dp.dim_aparam();
aparam_nall = dp.is_aparam_nall();
}

DP_DeepPot* DP_NewDeepPot(const char* c_model) {
Expand Down Expand Up @@ -65,6 +66,7 @@ DP_DeepPotModelDevi::DP_DeepPotModelDevi(deepmd::DeepPotModelDevi& dp)
: dp(dp) {
dfparam = dp.dim_fparam();
daparam = dp.dim_aparam();
aparam_nall = dp.is_aparam_nall();
}

DP_DeepPotModelDevi* DP_NewDeepPotModelDevi(const char** c_models,
Expand Down Expand Up @@ -249,7 +251,10 @@ inline void DP_DeepPotComputeNList_variant(DP_DeepPot* dp,
}
std::vector<VALUETYPE> aparam_;
if (aparam) {
aparam_.assign(aparam, aparam + nframes * (natoms - nghost) * dp->daparam);
aparam_.assign(aparam,
aparam + nframes *
(dp->aparam_nall ? natoms : (natoms - nghost)) *
dp->daparam);
}
std::vector<double> e;
std::vector<VALUETYPE> f, v, ae, av;
Expand Down Expand Up @@ -433,7 +438,9 @@ void DP_DeepPotModelDeviComputeNList_variant(DP_DeepPotModelDevi* dp,
}
std::vector<VALUETYPE> aparam_;
if (aparam) {
aparam_.assign(aparam, aparam + (natoms - nghost) * dp->daparam);
aparam_.assign(
aparam,
aparam + (dp->aparam_nall ? natoms : (natoms - nghost)) * dp->daparam);
}
// different from DeepPot
std::vector<double> e;
Expand Down Expand Up @@ -1031,6 +1038,8 @@ int DP_DeepPotGetDimFParam(DP_DeepPot* dp) { return dp->dfparam; }

int DP_DeepPotGetDimAParam(DP_DeepPot* dp) { return dp->daparam; }

bool DP_DeepPotIsAParamNAll(DP_DeepPot* dp) { return dp->aparam_nall; }

const char* DP_DeepPotCheckOK(DP_DeepPot* dp) {
return string_to_char(dp->exception);
}
Expand Down Expand Up @@ -1133,6 +1142,10 @@ int DP_DeepPotModelDeviGetDimAParam(DP_DeepPotModelDevi* dp) {
return dp->daparam;
}

bool DP_DeepPotModelDeviIsAParamNAll(DP_DeepPotModelDevi* dp) {
return dp->aparam_nall;
}

const char* DP_DeepPotModelDeviCheckOK(DP_DeepPotModelDevi* dp) {
return string_to_char(dp->exception);
}
Expand Down
54 changes: 54 additions & 0 deletions source/api_c/tests/test_deeppot_a_fparam_aparam.cc
Original file line number Diff line number Diff line change
Expand Up @@ -380,3 +380,57 @@ TYPED_TEST(TestInferDeepPotAFParamAParam, cpu_lmp_nlist_2rc) {
EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON);
}
}

template <class VALUETYPE>
class TestInferAParamNAll : public ::testing::Test {
protected:
std::vector<VALUETYPE> coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74,
00.25, 3.32, 1.68, 3.36, 3.00, 1.81,
3.51, 2.51, 2.60, 4.27, 3.22, 1.56};
std::vector<int> atype = {0, 0, 0, 0, 0, 0};
std::vector<VALUETYPE> box = {13., 0., 0., 0., 13., 0., 0., 0., 13.};
int natoms = 6;

deepmd::hpp::DeepPot dp;

void SetUp() override {
std::string file_name = "../../tests/infer/pairwise_dprc.pbtxt";
deepmd::hpp::convert_pbtxt_to_pb(file_name, "pairwise_dprc.pb");
dp.init("pairwise_dprc.pb");
};

void TearDown() override { remove("fparam_aparam.pb"); };
};

TYPED_TEST_SUITE(TestInferAParamNAll, ValueTypes);

TYPED_TEST(TestInferAParamNAll, cpu_lmp_nlist) {
using VALUETYPE = TypeParam;
std::vector<VALUETYPE>& coord = this->coord;
std::vector<int>& atype = this->atype;
std::vector<VALUETYPE>& box = this->box;
int& natoms = this->natoms;
deepmd::hpp::DeepPot& dp = this->dp;
float rc = dp.cutoff();
int nloc = coord.size() / 3;
std::vector<VALUETYPE> coord_cpy;
std::vector<int> atype_cpy, mapping;
std::vector<std::vector<int> > nlist_data;
_build_nlist<VALUETYPE>(nlist_data, coord_cpy, atype_cpy, mapping, coord,
atype, box, rc);
int nall = coord_cpy.size() / 3;
// nall aparam
std::vector<VALUETYPE> aparam_cpy(nall, 0);
// for some reason all QM atoms do not work
aparam_cpy[0] = 1;
std::vector<int> ilist(nloc), numneigh(nloc);
std::vector<int*> firstneigh(nloc);
deepmd::hpp::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_data);

double ener;
std::vector<VALUETYPE> force_, virial;
dp.compute(ener, force_, virial, coord_cpy, atype_cpy, box, nall - nloc,
inlist, 0, std::vector<VALUETYPE>(), aparam_cpy);
// just check if the interface accepts nall aparam; no interest with results
}
21 changes: 21 additions & 0 deletions source/api_cc/include/DeepPot.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,16 @@ class DeepPot {
**/
void get_type_map(std::string& type_map);

/**
* @brief Get whether the atom dimension of aparam is nall instead of fparam.
* @param[out] aparam_nall whether the atom dimension of aparam is nall
*instead of fparam.
**/
bool is_aparam_nall() const {
assert(inited);
return aparam_nall;
};

private:
tensorflow::Session* session;
int num_intra_nthreads, num_inter_nthreads;
Expand All @@ -309,6 +319,7 @@ class DeepPot {
int ntypes_spin;
int dfparam;
int daparam;
bool aparam_nall;
/**
* @brief Validate the size of frame and atomic parameters.
* @param[in] nframes The number of frames.
Expand Down Expand Up @@ -572,6 +583,15 @@ class DeepPotModelDevi {
void compute_relative_std_f(std::vector<VALUETYPE>& std,
const std::vector<VALUETYPE>& avg,
const VALUETYPE eps);
/**
* @brief Get whether the atom dimension of aparam is nall instead of fparam.
* @param[out] aparam_nall whether the atom dimension of aparam is nall
*instead of fparam.
**/
bool is_aparam_nall() const {
assert(inited);
return aparam_nall;
};

private:
unsigned numb_models;
Expand All @@ -592,6 +612,7 @@ class DeepPotModelDevi {
int ntypes_spin;
int dfparam;
int daparam;
bool aparam_nall;
template <typename VALUETYPE>
void validate_fparam_aparam(const int& nloc,
const std::vector<VALUETYPE>& fparam,
Expand Down

0 comments on commit 0d5737f

Please sign in to comment.