Skip to content

Commit d737df0

Browse files
authored
Merge pull request #94 from amcadmus/devel
auto convert input to v1 compatibility
2 parents 4f2e001 + b12467e commit d737df0

File tree

9 files changed

+367
-2
lines changed

9 files changed

+367
-2
lines changed

examples/water/train/water_se_a.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
"learning_rate" :{
2626
"type": "exp",
27-
"start_lr": 0.005,
27+
"start_lr": 0.001,
2828
"decay_steps": 5000,
2929
"decay_rate": 0.95,
3030
"_comment": "that's all"
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
{
2+
"_comment": " model parameters",
3+
"use_smooth": true,
4+
"sel_a": [46, 92],
5+
"rcut_smth": 5.80,
6+
"rcut": 6.00,
7+
"filter_neuron": [25, 50, 100],
8+
"filter_resnet_dt": false,
9+
"axis_neuron": 16,
10+
"fitting_neuron": [240, 240, 240],
11+
"fitting_resnet_dt":true,
12+
"coord_norm": true,
13+
"type_fitting_net": false,
14+
15+
"_comment": " traing controls",
16+
"systems": ["../data/"],
17+
"set_prefix": "set",
18+
"stop_batch": 1000000,
19+
"batch_size": [1],
20+
"start_lr": 0.001,
21+
"decay_steps": 5000,
22+
"decay_rate": 0.95,
23+
24+
"start_pref_e": 0.02,
25+
"limit_pref_e": 1,
26+
"start_pref_f": 1000,
27+
"limit_pref_f": 1,
28+
"start_pref_v": 0,
29+
"limit_pref_v": 0,
30+
31+
"seed": 1,
32+
33+
"_comment": " display and restart",
34+
"_comment": " frequencies counted in batch",
35+
"disp_file": "lcurve.out",
36+
"disp_freq": 100,
37+
"numb_test": 10,
38+
"save_freq": 1000,
39+
"save_ckpt": "model.ckpt",
40+
"disp_training": true,
41+
"time_training": true,
42+
"profiling": true,
43+
"profiling_file": "timeline.json",
44+
45+
"_comment": "that's all"
46+
}
47+
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
{
2+
"model": {
3+
"descriptor" :{
4+
"type": "se_a",
5+
"sel": [46, 92],
6+
"rcut_smth": 5.80,
7+
"rcut": 6.00,
8+
"neuron": [25, 50, 100],
9+
"axis_neuron": 16,
10+
"resnet_dt": false,
11+
"seed": 1
12+
},
13+
"fitting_net" : {
14+
"neuron": [240, 240, 240],
15+
"resnet_dt": true,
16+
"seed": 1
17+
}
18+
},
19+
20+
"learning_rate" :{
21+
"type": "exp",
22+
"decay_steps": 5000,
23+
"decay_rate": 0.95,
24+
"start_lr": 0.001
25+
},
26+
27+
"loss" :{
28+
"start_pref_e": 0.02,
29+
"limit_pref_e": 1,
30+
"start_pref_f": 1000,
31+
"limit_pref_f": 1,
32+
"start_pref_v": 0,
33+
"limit_pref_v": 0
34+
},
35+
36+
"training" : {
37+
"systems": ["../data/"],
38+
"set_prefix": "set",
39+
"stop_batch": 1000000,
40+
"batch_size": [1],
41+
42+
"seed": 1,
43+
44+
"disp_file": "lcurve.out",
45+
"disp_freq": 100,
46+
"numb_test": 10,
47+
"save_freq": 1000,
48+
"save_ckpt": "model.ckpt",
49+
"disp_training":true,
50+
"time_training":true,
51+
"profiling": true,
52+
"profiling_file":"timeline.json"
53+
}
54+
}
55+
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
{
2+
"with_distrib": false,
3+
"_comment": " model parameters",
4+
"use_smooth": false,
5+
"sel_a": [16, 32],
6+
"sel_r": [30, 60],
7+
"rcut": 6.00,
8+
"axis_rule": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0],
9+
"_comment": " default rule: []",
10+
"_comment": " user defined rule: for each type provides two axes, ",
11+
"_comment": " for each axis: (a_or_r, type, idx)",
12+
"_comment": " if type < 0, exclude type -(type+1)",
13+
"_comment": " for water (O:0, H:1) it can be",
14+
"_comment": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]",
15+
"fitting_neuron": [240, 120, 60, 30, 10],
16+
17+
"_comment": " traing controls",
18+
"systems": ["../data/"],
19+
"set_prefix": "set",
20+
"stop_batch": 1000000,
21+
"batch_size": [4],
22+
"start_lr": 0.001,
23+
"decay_steps": 5000,
24+
"decay_rate": 0.95,
25+
26+
"start_pref_e": 0.02,
27+
"limit_pref_e": 8,
28+
"start_pref_f": 1000,
29+
"limit_pref_f": 1,
30+
"start_pref_v": 0,
31+
"limit_pref_v": 0,
32+
33+
"seed": 1,
34+
35+
"_comment": " display and restart",
36+
"_comment": " frequencies counted in batch",
37+
"disp_file": "lcurve.out",
38+
"disp_freq": 100,
39+
"numb_test": 10,
40+
"save_freq": 1000,
41+
"save_ckpt": "model.ckpt",
42+
"disp_training": true,
43+
"time_training": true,
44+
45+
"_comment": "that's all"
46+
}
47+
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
{
2+
"with_distrib": false,
3+
"model":{
4+
"descriptor": {
5+
"type": "loc_frame",
6+
"sel_a": [16, 32],
7+
"sel_r": [30, 60],
8+
"rcut": 6.00,
9+
"axis_rule": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]
10+
},
11+
"fitting_net": {
12+
"neuron": [240, 120, 60, 30, 10],
13+
"resnet_dt": true,
14+
"seed": 1
15+
}
16+
},
17+
18+
"learning_rate" :{
19+
"type": "exp",
20+
"decay_steps": 5000,
21+
"decay_rate": 0.95,
22+
"start_lr": 0.001
23+
},
24+
25+
"loss" : {
26+
"start_pref_e": 0.02,
27+
"limit_pref_e": 8,
28+
"start_pref_f": 1000,
29+
"limit_pref_f": 1,
30+
"start_pref_v": 0,
31+
"limit_pref_v": 0
32+
},
33+
34+
"training": {
35+
"systems": ["../data/"],
36+
"set_prefix": "set",
37+
"stop_batch": 1000000,
38+
"batch_size": [4],
39+
40+
"seed": 1,
41+
42+
"disp_file": "lcurve.out",
43+
"disp_freq": 100,
44+
"numb_test": 10,
45+
"save_freq": 1000,
46+
"save_ckpt": "model.ckpt",
47+
"disp_training":true,
48+
"time_training":true
49+
}
50+
}
51+
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import os,sys,json
2+
import numpy as np
3+
import unittest
4+
5+
from deepmd.compat import convert_input_v0_v1
6+
7+
class TestConvertInputV0V1 (unittest.TestCase) :
8+
def test_convert_smth(self):
9+
with open(os.path.join('compat_inputs', 'water_se_a_v0.json')) as fp:
10+
jdata0 = json.load(fp)
11+
with open(os.path.join('compat_inputs', 'water_se_a_v1.json')) as fp:
12+
jdata1 = json.load(fp)
13+
jdata = convert_input_v0_v1(jdata0, warning = False, dump = None)
14+
self.assertEqual(jdata, jdata1)
15+
16+
def test_convert_nonsmth(self):
17+
with open(os.path.join('compat_inputs', 'water_v0.json')) as fp:
18+
jdata0 = json.load(fp)
19+
with open(os.path.join('compat_inputs', 'water_v1.json')) as fp:
20+
jdata1 = json.load(fp)
21+
jdata = convert_input_v0_v1(jdata0, warning = False, dump = None)
22+
self.assertEqual(jdata, jdata1)
23+

source/train/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
configure_file("RunOptions.py.in" "${CMAKE_CURRENT_BINARY_DIR}/RunOptions.py" @ONLY)
44

5-
file(GLOB LIB_PY main.py common.py env.py Network.py Deep*.py Data.py DataSystem.py Model*.py Descrpt*.py Fitting.py Loss.py LearningRate.py Trainer.py TabInter.py ${CMAKE_CURRENT_BINARY_DIR}/RunOptions.py)
5+
file(GLOB LIB_PY main.py common.py env.py compat.py Network.py Deep*.py Data.py DataSystem.py Model*.py Descrpt*.py Fitting.py Loss.py LearningRate.py Trainer.py TabInter.py ${CMAKE_CURRENT_BINARY_DIR}/RunOptions.py)
66

77
file(GLOB CLS_PY Local.py Slurm.py)
88

source/train/compat.py

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
import os,json,warnings
2+
from deepmd.common import j_have,j_must_have,j_must_have_d
3+
4+
def convert_input_v0_v1(jdata, warning = True, dump = None) :
5+
output = {}
6+
if 'with_distrib' in jdata:
7+
output['with_distrib'] = jdata['with_distrib']
8+
if jdata['use_smooth'] :
9+
output['model'] = _smth_model(jdata)
10+
else:
11+
output['model'] = _nonsmth_model(jdata)
12+
output['learning_rate'] = _learning_rate(jdata)
13+
output['loss'] = _loss(jdata)
14+
output['training'] = _training(jdata)
15+
_warnning_input_v0_v1(dump)
16+
if dump is not None:
17+
with open(dump, 'w') as fp:
18+
json.dump(output, fp, indent=4)
19+
return output
20+
21+
def _warnning_input_v0_v1(fname) :
22+
msg = 'It seems that you are using a deepmd-kit input of version 0.x.x, which is deprecated. we have converted the input to >1.0.0 compatible'
23+
if fname is not None:
24+
msg += ', and output it to file ' + fname
25+
warnings.warn(msg)
26+
27+
def _nonsmth_model(jdata):
28+
model = {}
29+
model['descriptor'] = _nonsmth_descriptor(jdata)
30+
model['fitting_net'] = _fitting_net(jdata)
31+
return model
32+
33+
def _smth_model(jdata):
34+
model = {}
35+
model['descriptor'] = _smth_descriptor(jdata)
36+
model['fitting_net'] = _fitting_net(jdata)
37+
return model
38+
39+
def _nonsmth_descriptor(jdata) :
40+
output = {}
41+
seed = None
42+
if j_have (jdata, 'seed') :
43+
seed = jdata['seed']
44+
# model
45+
descriptor = {}
46+
descriptor['type'] = 'loc_frame'
47+
descriptor['sel_a'] = jdata['sel_a']
48+
descriptor['sel_r'] = jdata['sel_r']
49+
descriptor['rcut'] = jdata['rcut']
50+
descriptor['axis_rule'] = jdata['axis_rule']
51+
return descriptor
52+
53+
def _smth_descriptor(jdata):
54+
descriptor = {}
55+
seed = None
56+
if j_have (jdata, 'seed') :
57+
seed = jdata['seed']
58+
descriptor['type'] = 'se_a'
59+
descriptor['sel'] = jdata['sel_a']
60+
if j_have(jdata, 'rcut_smth') :
61+
descriptor['rcut_smth'] = jdata['rcut_smth']
62+
else :
63+
descriptor['rcut_smth'] = descriptor['rcut']
64+
descriptor['rcut'] = jdata['rcut']
65+
descriptor['neuron'] = j_must_have (jdata, 'filter_neuron')
66+
descriptor['axis_neuron'] = j_must_have_d (jdata, 'axis_neuron', ['n_axis_neuron'])
67+
descriptor['resnet_dt'] = False
68+
if j_have(jdata, 'resnet_dt') :
69+
descriptor['resnet_dt'] = jdata['filter_resnet_dt']
70+
if seed is not None:
71+
descriptor['seed'] = seed
72+
return descriptor
73+
74+
def _fitting_net(jdata):
75+
fitting_net = {}
76+
seed = None
77+
if j_have (jdata, 'seed') :
78+
seed = jdata['seed']
79+
fitting_net['neuron']= j_must_have_d (jdata, 'fitting_neuron', ['n_neuron'])
80+
fitting_net['resnet_dt'] = True
81+
if j_have(jdata, 'resnet_dt') :
82+
fitting_net['resnet_dt'] = jdata['resnet_dt']
83+
if j_have(jdata, 'fitting_resnet_dt') :
84+
fitting_net['resnet_dt'] = jdata['fitting_resnet_dt']
85+
if seed is not None:
86+
fitting_net['seed'] = seed
87+
return fitting_net
88+
89+
def _learning_rate(jdata):
90+
# learning rate
91+
learning_rate = {}
92+
learning_rate['type'] = 'exp'
93+
learning_rate['decay_steps'] = j_must_have(jdata, 'decay_steps')
94+
learning_rate['decay_rate'] = j_must_have(jdata, 'decay_rate')
95+
learning_rate['start_lr'] = j_must_have(jdata, 'start_lr')
96+
return learning_rate
97+
98+
def _loss(jdata):
99+
# loss
100+
loss = {}
101+
loss['start_pref_e'] = j_must_have (jdata, 'start_pref_e')
102+
loss['limit_pref_e'] = j_must_have (jdata, 'limit_pref_e')
103+
loss['start_pref_f'] = j_must_have (jdata, 'start_pref_f')
104+
loss['limit_pref_f'] = j_must_have (jdata, 'limit_pref_f')
105+
loss['start_pref_v'] = j_must_have (jdata, 'start_pref_v')
106+
loss['limit_pref_v'] = j_must_have (jdata, 'limit_pref_v')
107+
if j_have(jdata, 'start_pref_ae') :
108+
loss['start_pref_ae'] = jdata['start_pref_ae']
109+
if j_have(jdata, 'limit_pref_ae') :
110+
loss['limit_pref_ae'] = jdata['limit_pref_ae']
111+
return loss
112+
113+
def _training(jdata):
114+
# training
115+
training = {}
116+
seed = None
117+
if j_have (jdata, 'seed') :
118+
seed = jdata['seed']
119+
training['systems'] = jdata['systems']
120+
training['set_prefix'] = jdata['set_prefix']
121+
training['stop_batch'] = jdata['stop_batch']
122+
training['batch_size'] = jdata['batch_size']
123+
if seed is not None:
124+
training['seed'] = seed
125+
training['disp_file'] = "lcurve.out"
126+
if j_have (jdata, "disp_file") : training['disp_file'] = jdata["disp_file"]
127+
training['disp_freq'] = j_must_have (jdata, 'disp_freq')
128+
training['numb_test'] = j_must_have (jdata, 'numb_test')
129+
training['save_freq'] = j_must_have (jdata, 'save_freq')
130+
training['save_ckpt'] = j_must_have (jdata, 'save_ckpt')
131+
training['disp_training'] = j_must_have (jdata, 'disp_training')
132+
training['time_training'] = j_must_have (jdata, 'time_training')
133+
if j_have (jdata, 'profiling') :
134+
training['profiling'] = jdata['profiling']
135+
if training['profiling'] :
136+
training['profiling_file'] = j_must_have (jdata, 'profiling_file')
137+
return training

0 commit comments

Comments
 (0)