Skip to content

Commit

Permalink
Merge pull request #77 from deepmodeling/devel
Browse files Browse the repository at this point in the history
merge from devel
  • Loading branch information
njzjz committed Feb 10, 2022
2 parents 1f05364 + 0d8fe0a commit 30f8e7c
Show file tree
Hide file tree
Showing 42 changed files with 1,566 additions and 72 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
- name: Build wheels
env:
CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-*"
CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-* cp310-*"
CIBW_MANYLINUX_X86_64_IMAGE: ghcr.io/deepmodeling/manylinux2010_x86_64_tensorflow
CIBW_BEFORE_BUILD: pip install tensorflow
CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux*"
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/test_python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ jobs:
- python: 3.7
gcc: 8
tf:
- python: 3.8
- python: "3.10"
gcc: 5
tf:
- python: 3.8
- python: "3.10"
gcc: 8
tf:

Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ _templates
API_CC
doc/api_py/
dp/
dp_test/
dp_test_cc/
build_lammps/
.idea/
build_tests/
Expand Down
4 changes: 4 additions & 0 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
version: 2
build:
os: ubuntu-20.04
tools:
python: mambaforge-4.10
conda:
environment: doc/environment.yml
formats: all
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ A full [document](doc/train/train-input-auto.rst) on options in the training inp
- [Fit energy](doc/model/train-energy.md)
- [Fit `tensor` like `Dipole` and `Polarizability`](doc/model/train-fitting-tensor.md)
- [Train a Deep Potential model using `type embedding` approach](doc/model/train-se-e2-a-tebd.md)
- [Deep potential long-range](doc/model/dplr.md)
- [Training](doc/train/index.md)
- [Training a model](doc/train/training.md)
- [Advanced options](doc/train/training-advanced.md)
Expand Down
6 changes: 1 addition & 5 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,11 +538,6 @@ def cast_precision(func: Callable) -> Callable:
If it does not match (e.g. it is an integer), the decorator
will do nothing on it.
Parameters
----------
precision : tf.DType
Tensor data type that casts to
Returns
-------
Callable
Expand All @@ -560,6 +555,7 @@ def cast_precision(func: Callable) -> Callable:
... def f(x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
... return x ** 2 + y
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
# only convert tensors
returned_tensor = func(
Expand Down
2 changes: 1 addition & 1 deletion deepmd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def parse_args(args: Optional[List[str]] = None):
"--system",
default=".",
type=str,
help="The system directory, not support recursive detection.",
help="The system directory. Recursively detect systems in this directory.",
)
parser_model_devi.add_argument(
"-S", "--set-prefix", default="set", type=str, help="The set prefix"
Expand Down
16 changes: 10 additions & 6 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def _build_lower(
def build (self,
inputs : tf.Tensor,
natoms : tf.Tensor,
input_dict : dict = {},
input_dict : dict = None,
reuse : bool = None,
suffix : str = '',
) -> tf.Tensor:
Expand Down Expand Up @@ -362,6 +362,8 @@ def build (self,
ener
The system energy
"""
if input_dict is None:
input_dict = {}
bias_atom_e = self.bias_atom_e
if self.numb_fparam > 0 and ( self.fparam_avg is None or self.fparam_inv_std is None ):
raise RuntimeError('No data stat result. one should do data statisitic, before build')
Expand Down Expand Up @@ -401,7 +403,12 @@ def build (self,
inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]])
if len(self.atom_ener):
# only for atom_ener
inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision)
nframes = input_dict.get('nframes')
if nframes is not None:
# like inputs, but we don't want to add a dependency on inputs
inputs_zero = tf.zeros((nframes, self.dim_descrpt * natoms[0]), dtype=self.fitting_precision)
else:
inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision)


if bias_atom_e is not None :
Expand All @@ -419,10 +426,7 @@ def build (self,
aparam = (aparam - t_aparam_avg) * t_aparam_istd
aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]])

if input_dict is not None:
type_embedding = input_dict.get('type_embedding', None)
else:
type_embedding = None
type_embedding = input_dict.get('type_embedding', None)
if type_embedding is not None:
atype_embed = embed_atom_type(self.ntypes, natoms, type_embedding)
atype_embed = tf.tile(atype_embed,[tf.shape(inputs)[0],1])
Expand Down
60 changes: 34 additions & 26 deletions deepmd/infer/model_devi.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from .deep_pot import DeepPot
from ..utils.data import DeepmdData
from ..utils.batch_size import AutoBatchSize
from deepmd.common import expand_sys_str


def calc_model_devi_f(fs: np.ndarray):
Expand Down Expand Up @@ -56,11 +57,12 @@ def write_model_devi_out(devi: np.ndarray, fname: str):
header = "%10s" % "step"
for item in 'vf':
header += "%19s%19s%19s" % (f"max_devi_{item}", f"min_devi_{item}", f"avg_devi_{item}")
np.savetxt(fname,
devi,
fmt=['%12d'] + ['%19.6e' for _ in range(6)],
delimiter='',
header=header)
with open(fname, "ab") as fp:
np.savetxt(fp,
devi,
fmt=['%12d'] + ['%19.6e' for _ in range(6)],
delimiter='',
header=header)
return devi

def _check_tmaps(tmaps, ref_tmap=None):
Expand Down Expand Up @@ -185,25 +187,31 @@ def make_model_devi(
tmap = tmaps[0]
else:
raise RuntimeError("The models does not have the same type map.")

# create data-system
dp_data = DeepmdData(system, set_prefix, shuffle_test=False, type_map=tmap)
if dp_data.pbc:
nopbc = False
else:
nopbc = True

data_sets = [dp_data._load_set(set_name) for set_name in dp_data.dirs]
nframes_tot = 0
devis = []
for data in data_sets:
coord = data["coord"]
box = data["box"]
atype = data["type"][0]
devi = calc_model_devi(coord, box, atype, dp_models, nopbc=nopbc)
nframes_tot += coord.shape[0]
devis.append(devi)
devis = np.vstack(devis)
devis[:, 0] = np.arange(nframes_tot) * frequency
write_model_devi_out(devis, output)
return devis
all_sys = expand_sys_str(system)
if len(all_sys) == 0:
raise RuntimeError("Did not find valid system")
devis_coll = []
for system in all_sys:
# create data-system
dp_data = DeepmdData(system, set_prefix, shuffle_test=False, type_map=tmap)
if dp_data.pbc:
nopbc = False
else:
nopbc = True

data_sets = [dp_data._load_set(set_name) for set_name in dp_data.dirs]
nframes_tot = 0
devis = []
for data in data_sets:
coord = data["coord"]
box = data["box"]
atype = data["type"][0]
devi = calc_model_devi(coord, box, atype, dp_models, nopbc=nopbc)
nframes_tot += coord.shape[0]
devis.append(devi)
devis = np.vstack(devis)
devis[:, 0] = np.arange(nframes_tot) * frequency
write_model_devi_out(devis, output)
devis_coll.append(devis)
return devis_coll
2 changes: 1 addition & 1 deletion deepmd/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def build (self,
return l2_loss, more_loss

def eval(self, sess, feed_dict, natoms):
placeholder = tf.no_op()
placeholder = self.l2_l
run_data = [
self.l2_l,
self.l2_more['l2_ener_loss'] if self.has_e else placeholder,
Expand Down
7 changes: 5 additions & 2 deletions deepmd/model/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,9 @@ def build (self,
frz_model = None,
suffix = '',
reuse = None):


if input_dict is None:
input_dict = {}
with tf.variable_scope('model_attr' + suffix, reuse = reuse) :
t_tmap = tf.constant(' '.join(self.type_map),
name = 'tmap',
Expand All @@ -144,6 +146,7 @@ def build (self,

coord = tf.reshape (coord_, [-1, natoms[1] * 3])
atype = tf.reshape (atype_, [-1, natoms[1]])
input_dict['nframes'] = tf.shape(coord)[0]

# type embedding if any
if self.typeebd is not None:
Expand Down Expand Up @@ -270,4 +273,4 @@ def build (self,

def _import_graph_def_from_frz_model(self, frz_model, feed_dict, return_elements):
graph, graph_def = load_graph_def(frz_model)
return tf.import_graph_def(graph_def, input_map = feed_dict, return_elements = return_elements, name = "")
return tf.import_graph_def(graph_def, input_map = feed_dict, return_elements = return_elements, name = "")
7 changes: 6 additions & 1 deletion deepmd/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,12 @@ def build (self,
))
self.type_map = data.get_type_map()
self.batch_size = data.get_batch_size()
self.model.data_stat(data)
if self.run_opt.init_mode not in ('init_from_model', 'restart'):
# self.saver.restore (in self._init_session) will restore avg and std variables, so data_stat is useless
# currently init_from_frz_model does not restore data_stat variables
# TODO: restore avg and std in the init_from_frz_model mode
log.info("data stating... (this step may take long time)")
self.model.data_stat(data)

# config the init_frz_model command
if self.run_opt.init_mode == 'init_from_frz_model':
Expand Down
6 changes: 3 additions & 3 deletions deepmd/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def one_layer(inputs,
w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
b = tf.cast(b, get_precision(mixed_prec['compute_prec']))

hidden = tf.matmul(inputs, w) + b
hidden = tf.nn.bias_add(tf.matmul(inputs, w), b)
if activation_fn != None and use_timestep :
idt_initializer = tf.random_normal_initializer(
stddev=0.001,
Expand Down Expand Up @@ -196,7 +196,7 @@ def embedding_net(xx,
variable_summaries(w, 'matrix_'+str(ii)+name_suffix)

b = tf.get_variable('bias_'+str(ii)+name_suffix,
[1, outputs_size[ii]],
[outputs_size[ii]],
precision,
b_initializer,
trainable = trainable)
Expand All @@ -206,7 +206,7 @@ def embedding_net(xx,
xx = tf.cast(xx, get_precision(mixed_prec['compute_prec']))
w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
b = tf.cast(b, get_precision(mixed_prec['compute_prec']))
hidden = tf.reshape(activation_fn(tf.matmul(xx, w) + b), [-1, outputs_size[ii]])
hidden = tf.reshape(activation_fn(tf.nn.bias_add(tf.matmul(xx, w), b)), [-1, outputs_size[ii]])
if resnet_dt :
idt_initializer = tf.random_normal_initializer(
stddev=0.001,
Expand Down
2 changes: 1 addition & 1 deletion doc/data/data-conv.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,5 +51,5 @@ box.raw coord.raw energy.raw force.raw set.000 set.001 set.002 type.raw
```
It generates three sets `set.000`, `set.001` and `set.002`, with each set contains 2000 frames. One do not need to take care of the binary data files in each of the `set.*` directories. The path containing `set.*` and `type.raw` is called a *system*.

If one needs to train a non-periodic system, an empty `nopbc` file should be put under the system directory. `box.raw` is not necessary is a non-periodic system.
If one needs to train a non-periodic system, an empty `nopbc` file should be put under the system directory. `box.raw` is not necessary in a non-periodic system.

5 changes: 5 additions & 0 deletions doc/install/easy-install.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,3 +53,8 @@ To pull the GPU version:
```bash
docker pull ghcr.io/deepmodeling/deepmd-kit:2.0.0_cuda10.1_gpu
```

To pull the ROCm version:
```bash
docker pull deepmodeling/dpmdkit-rocm:dp2.0.3-rocm4.5.2-tf2.6-lmp29Sep2021
```
Loading

0 comments on commit 30f8e7c

Please sign in to comment.