Skip to content

Commit

Permalink
Merge devel into master (#2955)
Browse files Browse the repository at this point in the history
  • Loading branch information
wanghan-iapcm committed Oct 27, 2023
2 parents 2fe6927 + ce75fcb commit 839f4fe
Show file tree
Hide file tree
Showing 66 changed files with 564 additions and 301 deletions.
35 changes: 35 additions & 0 deletions .github/workflows/build_wheel.yml
Expand Up @@ -136,6 +136,41 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

build_pypi_index:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v3
with:
name: artifact
path: dist/packages
- uses: actions/setup-python@v4
name: Install Python
with:
python-version: '3.11'
- run: pip install dumb-pypi
- run: |
ls dist/packages > package_list.txt
dumb-pypi --output-dir dist --packages-url ../../packages --package-list package_list.txt --title "DeePMD-kit Developed Packages"
- name: Upload Pages artifact
uses: actions/upload-pages-artifact@v2
with:
path: dist
deploy_pypi_index:
needs: build_pypi_index
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
if: github.event_name == 'push' && github.ref == 'refs/heads/devel' && github.repository_owner == 'deepmodeling'
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2

pass:
name: Pass testing build wheels
needs: [build_wheels, build_sdist]
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_cc.yml
Expand Up @@ -30,7 +30,7 @@ jobs:
# TODO: remove ase version when ase has new release
- run: |
python -m pip install -U pip
python -m pip install -e .[cpu,test,lmp] "ase @ https://github.com/rosswhitfield/ase/archive/edd03571aff6944b77b4a4b055239f3c3e4eeb66.zip"
python -m pip install -e .[cpu,test,lmp] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
env:
DP_BUILD_TESTING: 1
- run: pytest --cov=deepmd source/lmp/tests
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_cuda.yml
Expand Up @@ -36,7 +36,7 @@ jobs:
- name: Set PyPI mirror for Aliyun cloud machine
run: python -m pip config --user set global.index-url https://mirrors.aliyun.com/pypi/simple/
- run: python -m pip install -U "pip>=21.3.1,!=23.0.0"
- run: python -m pip install -v -e .[gpu,test,lmp,cu11] "ase @ https://github.com/rosswhitfield/ase/archive/edd03571aff6944b77b4a4b055239f3c3e4eeb66.zip"
- run: python -m pip install -v -e .[gpu,test,lmp,cu11] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
env:
DP_BUILD_TESTING: 1
DP_VARIANT: cuda
Expand Down
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Expand Up @@ -30,13 +30,13 @@ repos:
exclude: ^source/3rdparty
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.292
rev: v0.1.1
hooks:
- id: ruff
args: ["--fix"]
exclude: ^source/3rdparty
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.9.1
rev: 23.10.0
hooks:
- id: black-jupyter
exclude: ^source/3rdparty
Expand All @@ -54,7 +54,7 @@ repos:
- id: blacken-docs
# C++
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.6
rev: v17.0.3
hooks:
- id: clang-format
exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc
Expand Down
7 changes: 0 additions & 7 deletions deepmd/descriptor/se_a.py
Expand Up @@ -469,13 +469,6 @@ def enable_compression(
"empty embedding-net are not supported in model compression!"
)

for ii in range(len(self.filter_neuron) - 1):
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
raise NotImplementedError(
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
"The size of the next layer of the neural network must be twice the size of the previous layer."
% ",".join([str(item) for item in self.filter_neuron])
)
if self.stripped_type_embedding:
ret_two_side = get_pattern_nodes_from_graph_def(
graph_def, f"filter_type_all{suffix}/.+_two_side_ebd"
Expand Down
8 changes: 0 additions & 8 deletions deepmd/descriptor/se_atten.py
Expand Up @@ -387,14 +387,6 @@ def enable_compression(
"empty embedding-net are not supported in model compression!"
)

for ii in range(len(self.filter_neuron) - 1):
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
raise NotImplementedError(
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
"The size of the next layer of the neural network must be twice the size of the previous layer."
% ",".join([str(item) for item in self.filter_neuron])
)

if self.attn_layer != 0:
raise RuntimeError("can not compress model when attention layer is not 0.")

Expand Down
8 changes: 0 additions & 8 deletions deepmd/descriptor/se_r.py
Expand Up @@ -334,14 +334,6 @@ def enable_compression(
not self.filter_resnet_dt
), "Model compression error: descriptor resnet_dt must be false!"

for ii in range(len(self.filter_neuron) - 1):
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
raise NotImplementedError(
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
"The size of the next layer of the neural network must be twice the size of the previous layer."
% ",".join([str(item) for item in self.filter_neuron])
)

self.compress = True
self.table = DPTabulate(
self,
Expand Down
8 changes: 0 additions & 8 deletions deepmd/descriptor/se_t.py
Expand Up @@ -349,14 +349,6 @@ def enable_compression(
not self.filter_resnet_dt
), "Model compression error: descriptor resnet_dt must be false!"

for ii in range(len(self.filter_neuron) - 1):
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
raise NotImplementedError(
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
"The size of the next layer of the neural network must be twice the size of the previous layer."
% ",".join([str(item) for item in self.filter_neuron])
)

self.compress = True
self.table = DPTabulate(
self,
Expand Down
39 changes: 23 additions & 16 deletions deepmd/entrypoints/convert.py
Expand Up @@ -5,6 +5,7 @@
convert_12_to_21,
convert_13_to_21,
convert_20_to_21,
convert_pb_to_pbtxt,
convert_pbtxt_to_pb,
convert_to_21,
)
Expand All @@ -17,20 +18,26 @@ def convert(
output_model: str,
**kwargs,
):
if FROM == "auto":
convert_to_21(input_model, output_model)
elif FROM == "0.12":
convert_012_to_21(input_model, output_model)
elif FROM == "1.0":
convert_10_to_21(input_model, output_model)
elif FROM in ["1.1", "1.2"]:
# no difference between 1.1 and 1.2
convert_12_to_21(input_model, output_model)
elif FROM == "1.3":
convert_13_to_21(input_model, output_model)
elif FROM == "2.0":
convert_20_to_21(input_model, output_model)
elif FROM == "pbtxt":
convert_pbtxt_to_pb(input_model, output_model)
if output_model[-6:] == ".pbtxt":
if input_model[-6:] != ".pbtxt":
convert_pb_to_pbtxt(input_model, output_model)
else:
raise RuntimeError("input model is already pbtxt")
else:
raise RuntimeError("unsupported model version " + FROM)
if FROM == "auto":
convert_to_21(input_model, output_model)
elif FROM == "0.12":
convert_012_to_21(input_model, output_model)
elif FROM == "1.0":
convert_10_to_21(input_model, output_model)
elif FROM in ["1.1", "1.2"]:
# no difference between 1.1 and 1.2
convert_12_to_21(input_model, output_model)
elif FROM == "1.3":
convert_13_to_21(input_model, output_model)
elif FROM == "2.0":
convert_20_to_21(input_model, output_model)
elif FROM == "pbtxt":
convert_pbtxt_to_pb(input_model, output_model)
else:
raise RuntimeError("unsupported model version " + FROM)
6 changes: 5 additions & 1 deletion deepmd/entrypoints/freeze.py
Expand Up @@ -511,9 +511,13 @@ def freeze(
# We import the meta graph and retrieve a Saver
try:
# In case paralle training
import horovod.tensorflow as _ # noqa: F401
import horovod.tensorflow as HVD
except ImportError:
pass
else:
HVD.init()
if HVD.rank() > 0:
return
saver = tf.train.import_meta_graph(
f"{input_checkpoint}.meta", clear_devices=clear_devices
)
Expand Down
4 changes: 1 addition & 3 deletions deepmd/entrypoints/train.py
Expand Up @@ -406,9 +406,7 @@ def get_nbor_stat(jdata, rcut, one_type: bool = False):
tmp_data.get_batch()
assert (
tmp_data.get_type_map()
), "In multi-task mode, 'type_map.raw' must be defined in data systems {}! ".format(
systems
)
), f"In multi-task mode, 'type_map.raw' must be defined in data systems {systems}! "
if train_data is None:
train_data = tmp_data
else:
Expand Down
1 change: 1 addition & 0 deletions deepmd/env.py
Expand Up @@ -89,6 +89,7 @@ def dlopen_library(module: str, filename: str):
"global_cvt_2_tf_float",
"global_cvt_2_ener_float",
"MODEL_VERSION",
"SHARED_LIB_DIR",
"SHARED_LIB_MODULE",
"default_tf_session_config",
"reset_default_tf_session_config",
Expand Down
2 changes: 1 addition & 1 deletion deepmd/fit/dos.py
Expand Up @@ -168,7 +168,7 @@ def get_numb_fparam(self) -> int:

def get_numb_aparam(self) -> int:
"""Get the number of atomic parameters."""
return self.numb_fparam
return self.numb_aparam

def get_numb_dos(self) -> int:
"""Get the number of gridpoints in energy space."""
Expand Down
2 changes: 1 addition & 1 deletion deepmd/fit/ener.py
Expand Up @@ -228,7 +228,7 @@ def get_numb_fparam(self) -> int:

def get_numb_aparam(self) -> int:
"""Get the number of atomic parameters."""
return self.numb_fparam
return self.numb_aparam

def compute_output_stats(self, all_stat: dict, mixed_type: bool = False) -> None:
"""Compute the ouput statistics.
Expand Down
7 changes: 5 additions & 2 deletions deepmd/infer/deep_pot.py
Expand Up @@ -307,7 +307,10 @@ def _get_natoms_and_nframes(
natoms = len(atom_types[0])
else:
natoms = len(atom_types)
coords = np.reshape(np.array(coords), [-1, natoms * 3])
if natoms == 0:
assert coords.size == 0
else:
coords = np.reshape(np.array(coords), [-1, natoms * 3])
nframes = coords.shape[0]
return natoms, nframes

Expand Down Expand Up @@ -415,7 +418,7 @@ def _prepare_feed_dict(
atom_types = np.array(atom_types, dtype=int).reshape([-1, natoms])
else:
atom_types = np.array(atom_types, dtype=int).reshape([-1])
coords = np.reshape(np.array(coords), [-1, natoms * 3])
coords = np.reshape(np.array(coords), [nframes, natoms * 3])
if cells is None:
pbc = False
# make cells to work around the requirement of pbc
Expand Down
3 changes: 2 additions & 1 deletion deepmd/lmp.py
Expand Up @@ -18,6 +18,7 @@
)

from deepmd.env import (
SHARED_LIB_DIR,
TF_VERSION,
tf,
)
Expand Down Expand Up @@ -74,7 +75,7 @@ def get_library_path(module: str, filename: str) -> List[str]:
raise RuntimeError("Unsupported platform")

tf_dir = tf.sysconfig.get_lib()
op_dir = str((Path(__file__).parent / "lib").absolute())
op_dir = str(SHARED_LIB_DIR)


cuda_library_paths = []
Expand Down
2 changes: 1 addition & 1 deletion deepmd/model/dos.py
Expand Up @@ -88,7 +88,7 @@ def get_numb_fparam(self) -> int:

def get_numb_aparam(self) -> int:
"""Get the number of atomic parameters."""
return self.numb_fparam
return self.numb_aparam

def data_stat(self, data):
all_stat = make_stat_input(data, self.data_stat_nbatch, merge_sys=False)
Expand Down
2 changes: 1 addition & 1 deletion deepmd/model/ener.py
Expand Up @@ -120,7 +120,7 @@ def get_numb_fparam(self) -> int:

def get_numb_aparam(self) -> int:
"""Get the number of atomic parameters."""
return self.numb_fparam
return self.numb_aparam

def data_stat(self, data):
all_stat = make_stat_input(data, self.data_stat_nbatch, merge_sys=False)
Expand Down
8 changes: 4 additions & 4 deletions deepmd/train/trainer.py
Expand Up @@ -368,12 +368,12 @@ def _build_network(self, data, suffix=""):
self.place_holders[kk] = tf.placeholder(
GLOBAL_TF_FLOAT_PRECISION, [None], "t_" + kk
)
self._get_place_horders(data_requirement)
self._get_place_holders(data_requirement)
else:
if not self.multi_task_mode:
self._get_place_horders(data.get_data_dict())
self._get_place_holders(data.get_data_dict())
else:
self._get_place_horders(data[next(iter(data.keys()))].get_data_dict())
self._get_place_holders(data[next(iter(data.keys()))].get_data_dict())

self.place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type")
self.place_holders["natoms_vec"] = tf.placeholder(
Expand Down Expand Up @@ -1035,7 +1035,7 @@ def save_compressed(self):
if self.is_compress:
self.saver.save(self.sess, os.path.join(os.getcwd(), self.save_ckpt))

def _get_place_horders(self, data_dict):
def _get_place_holders(self, data_dict):
for kk in data_dict.keys():
if kk == "type":
continue
Expand Down

0 comments on commit 839f4fe

Please sign in to comment.