Skip to content

Commit 839f4fe

Browse files
Merge devel into master (#2955)
2 parents 2fe6927 + ce75fcb commit 839f4fe

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+564
-301
lines changed

.github/workflows/build_wheel.yml

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,41 @@ jobs:
136136
tags: ${{ steps.meta.outputs.tags }}
137137
labels: ${{ steps.meta.outputs.labels }}
138138

139+
build_pypi_index:
140+
needs: [build_wheels, build_sdist]
141+
runs-on: ubuntu-latest
142+
steps:
143+
- uses: actions/download-artifact@v3
144+
with:
145+
name: artifact
146+
path: dist/packages
147+
- uses: actions/setup-python@v4
148+
name: Install Python
149+
with:
150+
python-version: '3.11'
151+
- run: pip install dumb-pypi
152+
- run: |
153+
ls dist/packages > package_list.txt
154+
dumb-pypi --output-dir dist --packages-url ../../packages --package-list package_list.txt --title "DeePMD-kit Developed Packages"
155+
- name: Upload Pages artifact
156+
uses: actions/upload-pages-artifact@v2
157+
with:
158+
path: dist
159+
deploy_pypi_index:
160+
needs: build_pypi_index
161+
permissions:
162+
pages: write
163+
id-token: write
164+
environment:
165+
name: github-pages
166+
url: ${{ steps.deployment.outputs.page_url }}
167+
runs-on: ubuntu-latest
168+
if: github.event_name == 'push' && github.ref == 'refs/heads/devel' && github.repository_owner == 'deepmodeling'
169+
steps:
170+
- name: Deploy to GitHub Pages
171+
id: deployment
172+
uses: actions/deploy-pages@v2
173+
139174
pass:
140175
name: Pass testing build wheels
141176
needs: [build_wheels, build_sdist]

.github/workflows/test_cc.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ jobs:
3030
# TODO: remove ase version when ase has new release
3131
- run: |
3232
python -m pip install -U pip
33-
python -m pip install -e .[cpu,test,lmp] "ase @ https://github.com/rosswhitfield/ase/archive/edd03571aff6944b77b4a4b055239f3c3e4eeb66.zip"
33+
python -m pip install -e .[cpu,test,lmp] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
3434
env:
3535
DP_BUILD_TESTING: 1
3636
- run: pytest --cov=deepmd source/lmp/tests

.github/workflows/test_cuda.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ jobs:
3636
- name: Set PyPI mirror for Aliyun cloud machine
3737
run: python -m pip config --user set global.index-url https://mirrors.aliyun.com/pypi/simple/
3838
- run: python -m pip install -U "pip>=21.3.1,!=23.0.0"
39-
- run: python -m pip install -v -e .[gpu,test,lmp,cu11] "ase @ https://github.com/rosswhitfield/ase/archive/edd03571aff6944b77b4a4b055239f3c3e4eeb66.zip"
39+
- run: python -m pip install -v -e .[gpu,test,lmp,cu11] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz"
4040
env:
4141
DP_BUILD_TESTING: 1
4242
DP_VARIANT: cuda

.pre-commit-config.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,13 @@ repos:
3030
exclude: ^source/3rdparty
3131
- repo: https://github.com/astral-sh/ruff-pre-commit
3232
# Ruff version.
33-
rev: v0.0.292
33+
rev: v0.1.1
3434
hooks:
3535
- id: ruff
3636
args: ["--fix"]
3737
exclude: ^source/3rdparty
3838
- repo: https://github.com/psf/black-pre-commit-mirror
39-
rev: 23.9.1
39+
rev: 23.10.0
4040
hooks:
4141
- id: black-jupyter
4242
exclude: ^source/3rdparty
@@ -54,7 +54,7 @@ repos:
5454
- id: blacken-docs
5555
# C++
5656
- repo: https://github.com/pre-commit/mirrors-clang-format
57-
rev: v16.0.6
57+
rev: v17.0.3
5858
hooks:
5959
- id: clang-format
6060
exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc

deepmd/descriptor/se_a.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -469,13 +469,6 @@ def enable_compression(
469469
"empty embedding-net are not supported in model compression!"
470470
)
471471

472-
for ii in range(len(self.filter_neuron) - 1):
473-
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
474-
raise NotImplementedError(
475-
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
476-
"The size of the next layer of the neural network must be twice the size of the previous layer."
477-
% ",".join([str(item) for item in self.filter_neuron])
478-
)
479472
if self.stripped_type_embedding:
480473
ret_two_side = get_pattern_nodes_from_graph_def(
481474
graph_def, f"filter_type_all{suffix}/.+_two_side_ebd"

deepmd/descriptor/se_atten.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -387,14 +387,6 @@ def enable_compression(
387387
"empty embedding-net are not supported in model compression!"
388388
)
389389

390-
for ii in range(len(self.filter_neuron) - 1):
391-
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
392-
raise NotImplementedError(
393-
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
394-
"The size of the next layer of the neural network must be twice the size of the previous layer."
395-
% ",".join([str(item) for item in self.filter_neuron])
396-
)
397-
398390
if self.attn_layer != 0:
399391
raise RuntimeError("can not compress model when attention layer is not 0.")
400392

deepmd/descriptor/se_r.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -334,14 +334,6 @@ def enable_compression(
334334
not self.filter_resnet_dt
335335
), "Model compression error: descriptor resnet_dt must be false!"
336336

337-
for ii in range(len(self.filter_neuron) - 1):
338-
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
339-
raise NotImplementedError(
340-
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
341-
"The size of the next layer of the neural network must be twice the size of the previous layer."
342-
% ",".join([str(item) for item in self.filter_neuron])
343-
)
344-
345337
self.compress = True
346338
self.table = DPTabulate(
347339
self,

deepmd/descriptor/se_t.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -349,14 +349,6 @@ def enable_compression(
349349
not self.filter_resnet_dt
350350
), "Model compression error: descriptor resnet_dt must be false!"
351351

352-
for ii in range(len(self.filter_neuron) - 1):
353-
if self.filter_neuron[ii] * 2 != self.filter_neuron[ii + 1]:
354-
raise NotImplementedError(
355-
"Model Compression error: descriptor neuron [%s] is not supported by model compression! "
356-
"The size of the next layer of the neural network must be twice the size of the previous layer."
357-
% ",".join([str(item) for item in self.filter_neuron])
358-
)
359-
360352
self.compress = True
361353
self.table = DPTabulate(
362354
self,

deepmd/entrypoints/convert.py

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
convert_12_to_21,
66
convert_13_to_21,
77
convert_20_to_21,
8+
convert_pb_to_pbtxt,
89
convert_pbtxt_to_pb,
910
convert_to_21,
1011
)
@@ -17,20 +18,26 @@ def convert(
1718
output_model: str,
1819
**kwargs,
1920
):
20-
if FROM == "auto":
21-
convert_to_21(input_model, output_model)
22-
elif FROM == "0.12":
23-
convert_012_to_21(input_model, output_model)
24-
elif FROM == "1.0":
25-
convert_10_to_21(input_model, output_model)
26-
elif FROM in ["1.1", "1.2"]:
27-
# no difference between 1.1 and 1.2
28-
convert_12_to_21(input_model, output_model)
29-
elif FROM == "1.3":
30-
convert_13_to_21(input_model, output_model)
31-
elif FROM == "2.0":
32-
convert_20_to_21(input_model, output_model)
33-
elif FROM == "pbtxt":
34-
convert_pbtxt_to_pb(input_model, output_model)
21+
if output_model[-6:] == ".pbtxt":
22+
if input_model[-6:] != ".pbtxt":
23+
convert_pb_to_pbtxt(input_model, output_model)
24+
else:
25+
raise RuntimeError("input model is already pbtxt")
3526
else:
36-
raise RuntimeError("unsupported model version " + FROM)
27+
if FROM == "auto":
28+
convert_to_21(input_model, output_model)
29+
elif FROM == "0.12":
30+
convert_012_to_21(input_model, output_model)
31+
elif FROM == "1.0":
32+
convert_10_to_21(input_model, output_model)
33+
elif FROM in ["1.1", "1.2"]:
34+
# no difference between 1.1 and 1.2
35+
convert_12_to_21(input_model, output_model)
36+
elif FROM == "1.3":
37+
convert_13_to_21(input_model, output_model)
38+
elif FROM == "2.0":
39+
convert_20_to_21(input_model, output_model)
40+
elif FROM == "pbtxt":
41+
convert_pbtxt_to_pb(input_model, output_model)
42+
else:
43+
raise RuntimeError("unsupported model version " + FROM)

deepmd/entrypoints/freeze.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -511,9 +511,13 @@ def freeze(
511511
# We import the meta graph and retrieve a Saver
512512
try:
513513
# In case paralle training
514-
import horovod.tensorflow as _ # noqa: F401
514+
import horovod.tensorflow as HVD
515515
except ImportError:
516516
pass
517+
else:
518+
HVD.init()
519+
if HVD.rank() > 0:
520+
return
517521
saver = tf.train.import_meta_graph(
518522
f"{input_checkpoint}.meta", clear_devices=clear_devices
519523
)

0 commit comments

Comments
 (0)