Skip to content

Commit

Permalink
Merge devel into master (#2402)
Browse files Browse the repository at this point in the history
  • Loading branch information
amcadmus committed Mar 16, 2023
2 parents 6cdc5bf + afa27d7 commit 3ac8c4c
Show file tree
Hide file tree
Showing 20 changed files with 351 additions and 33 deletions.
12 changes: 6 additions & 6 deletions .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
platform_id: manylinux_aarch64
dp_variant: cpu
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
submodules: true
# https://github.com/pypa/setuptools_scm/issues/480
Expand All @@ -42,13 +42,13 @@ jobs:
name: Setup QEMU
if: matrix.platform_id == 'manylinux_aarch64'
- name: Build wheels
uses: pypa/cibuildwheel@v2.11.3
uses: pypa/cibuildwheel@v2.12.1
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_ARCHS: all
CIBW_BUILD: cp${{ matrix.python }}-${{ matrix.platform_id }}
DP_VARIANT: ${{ matrix.dp_variant }}
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
path: ./wheelhouse/*.whl
build_sdist:
Expand Down Expand Up @@ -79,7 +79,7 @@ jobs:
with:
name: artifact
path: dist
- uses: pypa/gh-action-pypi-publish@v4
- uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.pypi_password }}
Expand All @@ -103,12 +103,12 @@ jobs:

- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96
with:
images: ghcr.io/deepmodeling/deepmd-kit

- name: Build and push Docker image
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: source/install/docker
push: ${{ github.repository_owner == 'deepmodeling' && github.event_name == 'push' }}
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/package_c.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ jobs:
name: Build C library
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Package C library
run: ./source/install/docker_package_c.sh
- name: Test C library
run: ./source/install/docker_test_package_c.sh
# for download and debug
- name: Upload artifact
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
path: ./libdeepmd_c.tar.gz
- name: Release
Expand Down
4 changes: 2 additions & 2 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ class DescrptSeA(DescrptSe):
.. math::
(\mathcal{G}^i)_j = \mathcal{N}(s(r_{ji}))
:math:`\mathcal{G}^i_< \in \mathbb{R}^{N \times M_2}` takes first :math:`M_2`$` columns of
:math:`\mathcal{G}^i`$`. The equation of embedding network :math:`\mathcal{N}` can be found at
:math:`\mathcal{G}^i_< \in \mathbb{R}^{N \times M_2}` takes first :math:`M_2` columns of
:math:`\mathcal{G}^i`. The equation of embedding network :math:`\mathcal{N}` can be found at
:meth:`deepmd.utils.network.embedding_net`.
Parameters
Expand Down
4 changes: 2 additions & 2 deletions deepmd/descriptor/se_a_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ class DescrptSeAMask(DescrptSeA):
.. math::
(\mathcal{G}^i)_j = \mathcal{N}(s(r_{ji}))
:math:`\mathcal{G}^i_< \in \mathbb{R}^{N \times M_2}` takes first :math:`M_2`$` columns of
:math:`\mathcal{G}^i`$`. The equation of embedding network :math:`\mathcal{N}` can be found at
:math:`\mathcal{G}^i_< \in \mathbb{R}^{N \times M_2}` takes first :math:`M_2` columns of
:math:`\mathcal{G}^i`. The equation of embedding network :math:`\mathcal{N}` can be found at
:meth:`deepmd.utils.network.embedding_net`.
Specially for descriptor se_a_mask is a concise implementation of se_a.
The difference is that se_a_mask only considered a non-pbc system.
Expand Down
11 changes: 10 additions & 1 deletion deepmd/descriptor/se_atten.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from typing import (
List,
Optional,
Expand Down Expand Up @@ -67,6 +68,8 @@ class DescrptSeAtten(DescrptSeA):
exclude_types : List[List[int]]
The excluded pairs of types which have no interaction with each other.
For example, `[[0, 1]]` means no interaction between type 0 and type 1.
set_davg_zero
Set the shift of embedding net input to zero.
activation_function
The activation function in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -97,6 +100,7 @@ def __init__(
trainable: bool = True,
seed: Optional[int] = None,
type_one_side: bool = True,
set_davg_zero: bool = True,
exclude_types: List[List[int]] = [],
activation_function: str = "tanh",
precision: str = "default",
Expand All @@ -107,6 +111,11 @@ def __init__(
attn_mask: bool = False,
multi_task: bool = False,
) -> None:
if not set_davg_zero:
warnings.warn(
"Set 'set_davg_zero' False in descriptor 'se_atten' "
"may cause unexpected incontinuity during model inference!"
)
DescrptSeA.__init__(
self,
rcut,
Expand All @@ -119,7 +128,7 @@ def __init__(
seed=seed,
type_one_side=type_one_side,
exclude_types=exclude_types,
set_davg_zero=True,
set_davg_zero=set_davg_zero,
activation_function=activation_function,
precision=precision,
uniform_seed=uniform_seed,
Expand Down
19 changes: 10 additions & 9 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ class EnerFitting(Fitting):
\mathbf{y}=\mathcal{L}(\mathbf{x};\mathbf{w},\mathbf{b})=
\boldsymbol{\phi}(\mathbf{x}^T\mathbf{w}+\mathbf{b})
where :math:`\mathbf{x} \in \mathbb{R}^{N_1}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}`
where :math:`\mathbf{x} \in \mathbb{R}^{N_1}` is the input vector and :math:`\mathbf{y} \in \mathbb{R}^{N_2}`
is the output vector. :math:`\mathbf{w} \in \mathbb{R}^{N_1 \times N_2}` and
:math:`\mathbf{b} \in \mathbb{R}^{N_2}`$` are weights and biases, respectively,
:math:`\mathbf{b} \in \mathbb{R}^{N_2}` are weights and biases, respectively,
both of which are trainable if `trainable[i]` is `True`. :math:`\boldsymbol{\phi}`
is the activation function.
Expand All @@ -71,9 +71,9 @@ class EnerFitting(Fitting):
\mathbf{y}=\mathcal{L}^{(n)}(\mathbf{x};\mathbf{w},\mathbf{b})=
\mathbf{x}^T\mathbf{w}+\mathbf{b}
where :math:`\mathbf{x} \in \mathbb{R}^{N_{n-1}}`$` is the input vector and :math:`\mathbf{y} \in \mathbb{R}`
where :math:`\mathbf{x} \in \mathbb{R}^{N_{n-1}}` is the input vector and :math:`\mathbf{y} \in \mathbb{R}`
is the output scalar. :math:`\mathbf{w} \in \mathbb{R}^{N_{n-1}}` and
:math:`\mathbf{b} \in \mathbb{R}`$` are weights and bias, respectively,
:math:`\mathbf{b} \in \mathbb{R}` are weights and bias, respectively,
both of which are trainable if `trainable[n]` is `True`.
Parameters
Expand Down Expand Up @@ -549,13 +549,14 @@ def build(
aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]])

atype_nall = tf.reshape(atype, [-1, natoms[1]])
atype_filter = tf.cast(atype_nall >= 0, GLOBAL_TF_FLOAT_PRECISION)
self.atype_nloc = tf.slice(
atype_nall, [0, 0], [-1, natoms[0]]
) ## lammps will make error
atype_filter = tf.cast(self.atype_nloc >= 0, GLOBAL_TF_FLOAT_PRECISION)
self.atype_nloc = tf.reshape(self.atype_nloc, [-1])
# prevent embedding_lookup error,
# but the filter will be applied anyway
atype_nall = tf.clip_by_value(atype_nall, 0, self.ntypes - 1)
self.atype_nloc = tf.reshape(
tf.slice(atype_nall, [0, 0], [-1, natoms[0]]), [-1]
) ## lammps will make error
self.atype_nloc = tf.clip_by_value(self.atype_nloc, 0, self.ntypes - 1)
if type_embedding is not None:
atype_embed = tf.nn.embedding_lookup(type_embedding, self.atype_nloc)
else:
Expand Down
2 changes: 2 additions & 0 deletions deepmd/loggers/loggers.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,8 @@ def set_log_handles(

ch.setLevel(level)
ch.addFilter(_AppFilter())
# clean old handlers before adding new one
root_log.handlers.clear()
root_log.addHandler(ch)

# * add file handler ***************************************************************
Expand Down
7 changes: 6 additions & 1 deletion deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ def descrpt_se_atten_args():
doc_precision = f"The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_trainable = "If the parameters in the embedding net is trainable"
doc_seed = "Random seed for parameter initialization"
doc_set_davg_zero = "Set the normalization average to zero. This option should be set when `se_atten` descriptor or `atom_ener` in the energy fitting is used"
doc_exclude_types = "The excluded pairs of types which have no interaction with each other. For example, `[[0, 1]]` means no interaction between type 0 and type 1."
doc_attn = "The length of hidden vectors in attention layers"
doc_attn_layer = "The number of attention layers"
Expand Down Expand Up @@ -361,6 +362,9 @@ def descrpt_se_atten_args():
Argument(
"exclude_types", list, optional=True, default=[], doc=doc_exclude_types
),
Argument(
"set_davg_zero", bool, optional=True, default=True, doc=doc_set_davg_zero
),
Argument("attn", int, optional=True, default=128, doc=doc_attn),
Argument("attn_layer", int, optional=True, default=2, doc=doc_attn_layer),
Argument("attn_dotr", bool, optional=True, default=True, doc=doc_attn_dotr),
Expand Down Expand Up @@ -972,7 +976,8 @@ def training_data_args(): # ! added by Ziyao: new specification style for data
- list: the length of which is the same as the {link_sys}. The batch size of each system is given by the elements of the list.\n\n\
- int: all {link_sys} use the same batch size.\n\n\
- string "auto": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than 32.\n\n\
- string "auto:N": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.'
- string "auto:N": automatically determines the batch size so that the batch_size times the number of atoms in the system is no less than N.\n\n\
- string "mixed:N": the batch data will be sampled from all systems and merged into a mixed system with the batch size N. Only support the se_atten descriptor.'
doc_auto_prob_style = 'Determine the probability of systems automatically. The method is assigned by this key and can be\n\n\
- "prob_uniform" : the probability all the systems are equal, namely 1.0/self.get_nsystems()\n\n\
- "prob_sys_size" : the probability of a system is proportional to the number of batches in the system\n\n\
Expand Down
10 changes: 9 additions & 1 deletion deepmd/utils/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ def __init__(
self.type_idx_map = np.array(
sorter[np.searchsorted(type_map, self.type_map, sorter=sorter)]
)
# padding for virtual atom
self.type_idx_map = np.append(
self.type_idx_map, np.array([-1], dtype=np.int32)
)
self.type_map = type_map
if type_map is None and self.type_map is None and self.mixed_type:
raise RuntimeError("mixed_type format must have type_map!")
Expand Down Expand Up @@ -489,8 +493,12 @@ def _load_set(self, set_name: DPPath):
[(real_type == i).sum(axis=-1) for i in range(self.get_ntypes())],
dtype=np.int32,
).T
ghost_nums = np.array(
[(real_type == -1).sum(axis=-1)],
dtype=np.int32,
).T
assert (
atom_type_nums.sum(axis=-1) == natoms
atom_type_nums.sum(axis=-1) + ghost_nums.sum(axis=-1) == natoms
).all(), "some types in 'real_atom_types.npy' of set {} are not contained in {} types!".format(
set_name, self.get_ntypes()
)
Expand Down
106 changes: 104 additions & 2 deletions deepmd/utils/data_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ def __init__(
# batch size
self.batch_size = batch_size
is_auto_bs = False
self.mixed_systems = False
if isinstance(self.batch_size, int):
self.batch_size = self.batch_size * np.ones(self.nsystems, dtype=int)
elif isinstance(self.batch_size, str):
Expand All @@ -121,9 +122,16 @@ def __init__(
rule = 32
if len(words) == 2:
rule = int(words[1])
self.batch_size = self._make_auto_bs(rule)
elif "mixed" == words[0]:
self.mixed_systems = True
if len(words) == 2:
rule = int(words[1])
else:
raise RuntimeError("batch size must be specified for mixed systems")
self.batch_size = rule * np.ones(self.nsystems, dtype=int)
else:
raise RuntimeError("unknown batch_size rule " + words[0])
self.batch_size = self._make_auto_bs(rule)
elif isinstance(self.batch_size, list):
pass
else:
Expand Down Expand Up @@ -361,7 +369,7 @@ def _get_sys_probs(self, sys_probs, auto_prob_style): # depreciated
prob = self._process_sys_probs(sys_probs)
return prob

def get_batch(self, sys_idx: Optional[int] = None):
def get_batch(self, sys_idx: Optional[int] = None) -> dict:
# batch generation style altered by Ziyao Li:
# one should specify the "sys_prob" and "auto_prob_style" params
# via set_sys_prob() function. The sys_probs this function uses is
Expand All @@ -375,9 +383,36 @@ def get_batch(self, sys_idx: Optional[int] = None):
The index of system from which the batch is get.
If sys_idx is not None, `sys_probs` and `auto_prob_style` are ignored
If sys_idx is None, automatically determine the system according to `sys_probs` or `auto_prob_style`, see the following.
This option does not work for mixed systems.
Returns
-------
dict
The batch data
"""
if not hasattr(self, "default_mesh"):
self._make_default_mesh()
if not self.mixed_systems:
b_data = self.get_batch_standard(sys_idx)
else:
b_data = self.get_batch_mixed()
return b_data

def get_batch_standard(self, sys_idx: Optional[int] = None) -> dict:
"""Get a batch of data from the data systems in the standard way.
Parameters
----------
sys_idx : int
The index of system from which the batch is get.
If sys_idx is not None, `sys_probs` and `auto_prob_style` are ignored
If sys_idx is None, automatically determine the system according to `sys_probs` or `auto_prob_style`, see the following.
Returns
-------
dict
The batch data
"""
if sys_idx is not None:
self.pick_idx = sys_idx
else:
Expand All @@ -390,6 +425,73 @@ def get_batch(self, sys_idx: Optional[int] = None):
b_data["default_mesh"] = self.default_mesh[self.pick_idx]
return b_data

def get_batch_mixed(self) -> dict:
"""Get a batch of data from the data systems in the mixed way.
Returns
-------
dict
The batch data
"""
# mixed systems have a global batch size
batch_size = self.batch_size[0]
batch_data = []
for _ in range(batch_size):
self.pick_idx = dp_random.choice(np.arange(self.nsystems), p=self.sys_probs)
bb_data = self.data_systems[self.pick_idx].get_batch(1)
bb_data["natoms_vec"] = self.natoms_vec[self.pick_idx]
bb_data["default_mesh"] = self.default_mesh[self.pick_idx]
batch_data.append(bb_data)
b_data = self._merge_batch_data(batch_data)
return b_data

def _merge_batch_data(self, batch_data: List[dict]) -> dict:
"""Merge batch data from different systems.
Parameters
----------
batch_data : list of dict
A list of batch data from different systems.
Returns
-------
dict
The merged batch data.
"""
b_data = {}
max_natoms = max(bb["natoms_vec"][0] for bb in batch_data)
# natoms_vec
natoms_vec = np.zeros(2 + self.get_ntypes(), dtype=int)
natoms_vec[0:3] = max_natoms
b_data["natoms_vec"] = natoms_vec
# real_natoms_vec
real_natoms_vec = np.vstack([bb["natoms_vec"] for bb in batch_data])
b_data["real_natoms_vec"] = real_natoms_vec
# type
type_vec = np.full((len(batch_data), max_natoms), -1, dtype=int)
for ii, bb in enumerate(batch_data):
type_vec[ii, : bb["type"].shape[1]] = bb["type"][0]
b_data["type"] = type_vec
# default_mesh
default_mesh = np.mean([bb["default_mesh"] for bb in batch_data], axis=0)
b_data["default_mesh"] = default_mesh
# other data
data_dict = self.get_data_dict(0)
for kk, vv in data_dict.items():
if kk not in batch_data[0]:
continue
b_data["find_" + kk] = batch_data[0]["find_" + kk]
if not vv["atomic"]:
b_data[kk] = np.concatenate([bb[kk] for bb in batch_data], axis=0)
else:
b_data[kk] = np.zeros(
(len(batch_data), max_natoms * vv["ndof"] * vv["repeat"]),
dtype=batch_data[0][kk].dtype,
)
for ii, bb in enumerate(batch_data):
b_data[kk][ii, : bb[kk].shape[1]] = bb[kk][0]
return b_data

# ! altered by Marián Rynik
def get_test(self, sys_idx: Optional[int] = None, n_test: int = -1): # depreciated
"""Get test data from the the data systems.
Expand Down
4 changes: 2 additions & 2 deletions deepmd/utils/neighbor_stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ def builder():
t_natoms = place_holders["natoms_vec"]
if self.one_type:
# all types = 0, natoms_vec = [natoms, natoms, natoms]
t_type = tf.zeros_like(t_type, dtype=tf.int32)
t_natoms = tf.repeat(t_natoms[0], 3)
t_type = tf.clip_by_value(t_type, -1, 0)
t_natoms = tf.tile(t_natoms[0:1], [3])

_max_nbor_size, _min_nbor_dist = op_module.neighbor_stat(
place_holders["coord"],
Expand Down

0 comments on commit 3ac8c4c

Please sign in to comment.