Skip to content

Commit

Permalink
Merge pull request #594 from amcadmus/devel
Browse files Browse the repository at this point in the history
Add model deviation of virial
  • Loading branch information
amcadmus committed May 8, 2021
2 parents 850169f + 6218faf commit 75d7ca1
Show file tree
Hide file tree
Showing 5 changed files with 231 additions and 100 deletions.
10 changes: 10 additions & 0 deletions source/api_cc/include/DeepPot.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,16 @@ class DeepPotModelDevi
const std::vector<VALUETYPE > & all_energy);
void compute_avg (std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> > & xx);
void compute_std (
std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >& xx,
const int & stride);
void compute_relative_std (
std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const VALUETYPE eps,
const int & stride);
void compute_std_e (std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >& xx);
Expand Down
102 changes: 44 additions & 58 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -838,83 +838,79 @@ compute_avg (std::vector<VALUETYPE> & avg,
}


// void
// DeepPotModelDevi::
// compute_std (VALUETYPE & std,
// const VALUETYPE & avg,
// const vector<VALUETYPE >& xx)
// {
// std = 0;
// assert(xx.size() == numb_models);
// for (unsigned jj = 0; jj < xx.size(); ++jj){
// std += (xx[jj] - avg) * (xx[jj] - avg);
// }
// std = sqrt(std / VALUETYPE(numb_models));
// // std = sqrt(std / VALUETYPE(numb_models-));
// }

void
DeepPotModelDevi::
compute_std_e (std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >&xx)
compute_std (
std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >&xx,
const int & stride)
{
assert (xx.size() == numb_models);
if (numb_models == 0) return;

unsigned ndof = avg.size();
unsigned nloc = ndof;
assert (nloc == ndof);
unsigned nloc = ndof / stride;
assert (nloc * stride == ndof);

std.resize(nloc);
fill (std.begin(), std.end(), VALUETYPE(0.));

for (unsigned ii = 0; ii < numb_models; ++ii) {
for (unsigned jj = 0 ; jj < nloc; ++jj){
const VALUETYPE * tmp_f = &(xx[ii][jj]);
const VALUETYPE * tmp_avg = &(avg[jj]);
VALUETYPE vdiff = xx[ii][jj] - avg[jj];
std[jj] += vdiff * vdiff;
const VALUETYPE * tmp_f = &(xx[ii][jj*stride]);
const VALUETYPE * tmp_avg = &(avg[jj*stride]);
for (unsigned dd = 0; dd < stride; ++dd){
VALUETYPE vdiff = tmp_f[dd] - tmp_avg[dd];
std[jj] += vdiff * vdiff;
}
}
}

for (unsigned jj = 0; jj < nloc; ++jj){
std[jj] = sqrt(std[jj] / VALUETYPE(numb_models));
// std[jj] = sqrt(std[jj] / VALUETYPE(numb_models-1));
}
}


void
DeepPotModelDevi::
compute_std_e (std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >&xx)
{
compute_std(std, avg, xx, 1);
}

void
DeepPotModelDevi::
compute_std_f (std::vector<VALUETYPE> & std,
const std::vector<VALUETYPE> & avg,
const std::vector<std::vector<VALUETYPE> >&xx)
{
assert (xx.size() == numb_models);
if (numb_models == 0) return;
compute_std(std, avg, xx, 3);
}

void
DeepPotModelDevi::
compute_relative_std (
std::vector<VALUETYPE> &std,
const std::vector<VALUETYPE> &avg,
const VALUETYPE eps,
const int & stride)
{
unsigned ndof = avg.size();
unsigned nloc = ndof / 3;
assert (nloc * 3 == ndof);

std.resize(nloc);
fill (std.begin(), std.end(), VALUETYPE(0.));

for (unsigned ii = 0; ii < numb_models; ++ii) {
for (unsigned jj = 0 ; jj < nloc; ++jj){
const VALUETYPE * tmp_f = &(xx[ii][jj*3]);
const VALUETYPE * tmp_avg = &(avg[jj*3]);
VALUETYPE vdiff[3];
vdiff[0] = tmp_f[0] - tmp_avg[0];
vdiff[1] = tmp_f[1] - tmp_avg[1];
vdiff[2] = tmp_f[2] - tmp_avg[2];
std[jj] += deepmd::dot3(vdiff, vdiff);
}
}
unsigned nloc = std.size();
assert (nloc * stride == ndof);

for (unsigned jj = 0; jj < nloc; ++jj){
std[jj] = sqrt(std[jj] / VALUETYPE(numb_models));
// std[jj] = sqrt(std[jj] / VALUETYPE(numb_models-1));
for (unsigned ii = 0; ii < nloc; ++ii){
const VALUETYPE * tmp_avg = &(avg[ii*stride]);
VALUETYPE f_norm = 0.0;
for (unsigned dd = 0; dd < stride; ++dd){
f_norm += tmp_avg[dd] * tmp_avg[dd];
}
f_norm = sqrt(f_norm);
std[ii] /= f_norm + eps;
}
}

Expand All @@ -924,16 +920,6 @@ compute_relative_std_f (std::vector<VALUETYPE> &std,
const std::vector<VALUETYPE> &avg,
const VALUETYPE eps)
{
unsigned nloc = std.size();
for (unsigned ii = 0; ii < nloc; ++ii){
const VALUETYPE * tmp_avg = &(avg[ii*3]);
VALUETYPE vdiff[3];
vdiff[0] = tmp_avg[0];
vdiff[1] = tmp_avg[1];
vdiff[2] = tmp_avg[2];
VALUETYPE f_norm = sqrt(deepmd::dot3(vdiff, vdiff));
// relative std = std/(abs(f)+eps)
std[ii] /= f_norm + eps;
}
compute_relative_std(std, avg, eps, 3);
}

142 changes: 108 additions & 34 deletions source/api_cc/tests/test_deeppot_model_devi.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ TEST_F(TestInferDeepPotModeDevi, attrs)
EXPECT_EQ(dp1.dim_aparam(), dp_md.dim_aparam());
}


TEST_F(TestInferDeepPotModeDevi, cpu_lmp_list)
{
float rc = dp_md.cutoff();
Expand All @@ -105,8 +104,7 @@ TEST_F(TestInferDeepPotModeDevi, cpu_lmp_list)
for(int kk = 0; kk < nmodel; ++kk){
_fold_back(fdir[kk], fdir_[kk], mapping, nloc, nall, 3);
_fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3);
}

}

EXPECT_EQ(edir.size(), emd.size());
EXPECT_EQ(fdir.size(), fmd.size());
Expand All @@ -124,37 +122,6 @@ TEST_F(TestInferDeepPotModeDevi, cpu_lmp_list)
EXPECT_LT(fabs(vdir[kk][ii] - vmd[kk][ii]), 1e-10);
}
}

std::vector<double > avg_f, std_f;
dp_md.compute_avg(avg_f, fmd);
dp_md.compute_std_f(std_f, avg_f, fmd);

// manual compute std f
std::vector<double > manual_std_f(nloc);
EXPECT_EQ(fmd[0].size(), nloc * 3);
for(int ii = 0; ii < nloc; ++ii){
std::vector<double > avg_f(3, 0.0);
for(int dd = 0; dd < 3; ++dd){
for(int kk = 0; kk < nmodel; ++kk){
avg_f[dd] += fmd[kk][ii*3+dd];
}
avg_f[dd] /= (nmodel) * 1.0;
}
double std = 0.;
for(int kk = 0; kk < nmodel; ++kk){
for(int dd = 0; dd < 3; ++dd){
double tmp = fmd[kk][ii*3+dd] - avg_f[dd];
std += tmp * tmp;
}
}
std /= nmodel * 1.0;
manual_std_f[ii] = sqrt(std);
}

EXPECT_EQ(manual_std_f.size(), std_f.size());
for(int ii = 0; ii < std_f.size(); ++ii){
EXPECT_LT(fabs(manual_std_f[ii] - std_f[ii]), 1e-10);
}
}


Expand Down Expand Up @@ -214,3 +181,110 @@ TEST_F(TestInferDeepPotModeDevi, cpu_lmp_list_atomic)
}
}


TEST_F(TestInferDeepPotModeDevi, cpu_lmp_list_std)
{
float rc = dp_md.cutoff();
int nloc = coord.size() / 3;
std::vector<double> coord_cpy;
std::vector<int> atype_cpy, mapping;
std::vector<std::vector<int > > nlist_data;
_build_nlist(nlist_data, coord_cpy, atype_cpy, mapping,
coord, atype, box, rc);
int nall = coord_cpy.size() / 3;
std::vector<int> ilist(nloc), numneigh(nloc);
std::vector<int*> firstneigh(nloc);
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_data);

int nmodel = 2;
std::vector<double > edir(nmodel), emd;
std::vector<std::vector<double> > fdir_(nmodel), fdir(nmodel), vdir(nmodel), fmd_, fmd(nmodel), vmd;
std::vector<std::vector<double> > aemd(nmodel), aemd_, avmd(nmodel), avmd_;
dp0.compute(edir[0], fdir_[0], vdir[0], coord_cpy, atype_cpy, box, nall-nloc, inlist, 0);
dp1.compute(edir[1], fdir_[1], vdir[1], coord_cpy, atype_cpy, box, nall-nloc, inlist, 0);
dp_md.compute(emd, fmd_, vmd, aemd_, avmd_, coord_cpy, atype_cpy, box, nall-nloc, inlist, 0);
for(int kk = 0; kk < nmodel; ++kk){
_fold_back(fdir[kk], fdir_[kk], mapping, nloc, nall, 3);
_fold_back(fmd[kk], fmd_[kk], mapping, nloc, nall, 3);
_fold_back(avmd[kk], avmd_[kk], mapping, nloc, nall, 9);
aemd[kk].resize(nloc);
for(int ii = 0; ii < nloc; ++ii){
aemd[kk][ii] = aemd_[kk][ii];
}
}

// dp compute std e
std::vector<double > avg_e, std_e;
dp_md.compute_avg(avg_e, aemd);
dp_md.compute_std_e(std_e, avg_e, aemd);

// manual compute std e
std::vector<double > manual_avg_e(nloc);
std::vector<double > manual_std_e(nloc);
for(int ii = 0; ii < nloc; ++ii){
double avg_e(0.0);
for(int kk = 0; kk < nmodel; ++kk){
avg_e += aemd[kk][ii];
}
avg_e /= nmodel;
manual_avg_e[ii] = avg_e;
double std = 0;
for (int kk = 0; kk < nmodel; ++kk){
std += (aemd[kk][ii] - avg_e) * (aemd[kk][ii] - avg_e);
}
std = sqrt(std / nmodel);
manual_std_e[ii] = std;
}
EXPECT_EQ(manual_std_e.size(), std_e.size());
for(int ii = 0; ii < std_e.size(); ++ii){
EXPECT_LT(fabs(manual_avg_e[ii] - avg_e[ii]), 1e-10);
EXPECT_LT(fabs(manual_std_e[ii] - std_e[ii]), 1e-10);
}

// dp compute std f
std::vector<double > avg_f, std_f;
dp_md.compute_avg(avg_f, fmd);
dp_md.compute_std_f(std_f, avg_f, fmd);

// manual compute std f
std::vector<double > manual_std_f(nloc);
std::vector<double > manual_rel_std_f(nloc);
double eps = 0.2;
EXPECT_EQ(fmd[0].size(), nloc * 3);
for(int ii = 0; ii < nloc; ++ii){
std::vector<double > avg_f(3, 0.0);
for(int dd = 0; dd < 3; ++dd){
for(int kk = 0; kk < nmodel; ++kk){
avg_f[dd] += fmd[kk][ii*3+dd];
}
avg_f[dd] /= (nmodel) * 1.0;
}
double std = 0.;
for(int kk = 0; kk < nmodel; ++kk){
for(int dd = 0; dd < 3; ++dd){
double tmp = fmd[kk][ii*3+dd] - avg_f[dd];
std += tmp * tmp;
}
}
double f_norm = 0;
for (int dd = 0; dd < 3; ++dd){
f_norm += avg_f[dd] * avg_f[dd];
}
f_norm = sqrt(f_norm);
std /= nmodel * 1.0;
manual_std_f[ii] = sqrt(std);
manual_rel_std_f[ii] = manual_std_f[ii] / (f_norm + eps);
}

EXPECT_EQ(manual_std_f.size(), std_f.size());
for(int ii = 0; ii < std_f.size(); ++ii){
EXPECT_LT(fabs(manual_std_f[ii] - std_f[ii]), 1e-10);
}
dp_md.compute_relative_std_f(std_f, avg_f, eps);
EXPECT_EQ(manual_std_f.size(), std_f.size());
for(int ii = 0; ii < std_f.size(); ++ii){
EXPECT_LT(fabs(manual_rel_std_f[ii] - std_f[ii]), 1e-10);
}
}

0 comments on commit 75d7ca1

Please sign in to comment.