Skip to content

Commit

Permalink
merge devel into master (#1942)
Browse files Browse the repository at this point in the history
  • Loading branch information
amcadmus committed Sep 23, 2022
2 parents c393392 + ffb2fd8 commit 6e3d4a6
Show file tree
Hide file tree
Showing 83 changed files with 5,187 additions and 322 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
- name: Build wheels
env:
CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-* cp310-*"
CIBW_MANYLINUX_X86_64_IMAGE: ghcr.io/deepmodeling/manylinux_2_24_x86_64_tensorflow
CIBW_MANYLINUX_X86_64_IMAGE: ghcr.io/deepmodeling/manylinux_2_28_x86_64_tensorflow
CIBW_BEFORE_BUILD: pip install tensorflow
CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux*"
run: |
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ A full [document](doc/train/train-input-auto.rst) on options in the training inp
- [Descriptor `"se_e2_a"`](doc/model/train-se-e2-a.md)
- [Descriptor `"se_e2_r"`](doc/model/train-se-e2-r.md)
- [Descriptor `"se_e3"`](doc/model/train-se-e3.md)
- [Descriptor `"se_atten"`](doc/model/train-se-atten.md)
- [Descriptor `"hybrid"`](doc/model/train-hybrid.md)
- [Descriptor `sel`](doc/model/sel.md)
- [Fit energy](doc/model/train-energy.md)
Expand Down
10 changes: 6 additions & 4 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def gelu(x: tf.Tensor) -> tf.Tensor:
Original paper
https://arxiv.org/abs/1606.08415
"""
return op_module.gelu(x)
return op_module.gelu_custom(x)


def gelu_tf(x: tf.Tensor) -> tf.Tensor:
Expand Down Expand Up @@ -94,7 +94,7 @@ def gelu_wrapper(x):
return tensorflow.nn.gelu(x, approximate=True)
except AttributeError:
warnings.warn("TensorFlow does not provide an implementation of gelu, please upgrade your TensorFlow version. Fallback to the custom gelu operator.")
return op_module.gelu(x)
return op_module.gelu_custom(x)
return (lambda x: gelu_wrapper(x))(x)

# TODO this is not a good way to do things. This is some global variable to which
Expand Down Expand Up @@ -405,8 +405,8 @@ def j_loader(filename: Union[str, Path]) -> Dict[str, Any]:


def get_activation_func(
activation_fn: "_ACTIVATION",
) -> Callable[[tf.Tensor], tf.Tensor]:
activation_fn: Union["_ACTIVATION", None],
) -> Union[Callable[[tf.Tensor], tf.Tensor], None]:
"""Get activation function callable based on string name.
Parameters
Expand All @@ -424,6 +424,8 @@ def get_activation_func(
RuntimeError
if unknown activation function is specified
"""
if activation_fn is None or activation_fn in ['none', 'None']:
return None
if activation_fn not in ACTIVATION_FN_DICT:
raise RuntimeError(f"{activation_fn} is not a valid activation function")
return ACTIVATION_FN_DICT[activation_fn]
Expand Down
1 change: 1 addition & 0 deletions deepmd/descriptor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@
from .se_a_ef import DescrptSeAEf
from .se_a_ef import DescrptSeAEfLower
from .loc_frame import DescrptLocFrame
from .se_atten import DescrptSeAtten
10 changes: 4 additions & 6 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,7 @@ def __init__ (self,
self.embedding_net_variables = None
self.mixed_prec = None
self.place_holders = {}
nei_type = np.array([])
for ii in range(self.ntypes):
nei_type = np.append(nei_type, ii * np.ones(self.sel_a[ii])) # like a mask
self.nei_type = tf.constant(nei_type, dtype = tf.int32)
self.nei_type = np.repeat(np.arange(self.ntypes), self.sel_a) # like a mask

avg_zero = np.zeros([self.ntypes,self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
std_ones = np.ones ([self.ntypes,self.ndescrpt]).astype(GLOBAL_NP_FLOAT_PRECISION)
Expand Down Expand Up @@ -673,8 +670,9 @@ def _concat_type_embedding(
embedding:
environment of each atom represented by embedding.
'''
te_out_dim = type_embedding.get_shape().as_list()[-1]
nei_embed = tf.nn.embedding_lookup(type_embedding,tf.cast(self.nei_type,dtype=tf.int32)) # shape is [self.nnei, 1+te_out_dim]
te_out_dim = type_embedding.get_shape().as_list()[-1]
self.t_nei_type = tf.constant(self.nei_type, dtype=tf.int32)
nei_embed = tf.nn.embedding_lookup(type_embedding,tf.cast(self.t_nei_type,dtype=tf.int32)) # shape is [self.nnei, 1+te_out_dim]
nei_embed = tf.tile(nei_embed,(nframes*natoms[0],1)) # shape is [nframes*natoms[0]*self.nnei, te_out_dim]
nei_embed = tf.reshape(nei_embed,[-1,te_out_dim])
embedding_input = tf.concat([xyz_scatter,nei_embed],1) # shape is [nframes*natoms[0]*self.nnei, 1+te_out_dim]
Expand Down

0 comments on commit 6e3d4a6

Please sign in to comment.