22import numpy as np
33
44from deepmd .env import tf
5- from deepmd .common import ClassArg , add_data_requirement , get_activation_func
5+ from deepmd .common import ClassArg , add_data_requirement , get_activation_func , get_precision_func
66from deepmd .Network import one_layer
77from deepmd .DescrptLocFrame import DescrptLocFrame
88from deepmd .DescrptSeA import DescrptSeA
@@ -22,7 +22,8 @@ def __init__ (self, jdata, descrpt):
2222 .add ('rcond' , float , default = 1e-3 ) \
2323 .add ('seed' , int ) \
2424 .add ('atom_ener' , list , default = [])\
25- .add ("activation_function" , str , default = "tanh" )
25+ .add ("activation_function" , str , default = "tanh" )\
26+ .add ("precision" , int , default = 0 )
2627 class_data = args .parse (jdata )
2728 self .numb_fparam = class_data ['numb_fparam' ]
2829 self .numb_aparam = class_data ['numb_aparam' ]
@@ -31,6 +32,7 @@ def __init__ (self, jdata, descrpt):
3132 self .rcond = class_data ['rcond' ]
3233 self .seed = class_data ['seed' ]
3334 self .fitting_activation_fn = get_activation_func (class_data ["activation_function" ])
35+ self .fitting_precision = get_precision_func (class_data ['precision' ])
3436 self .atom_ener = []
3537 for at , ae in enumerate (class_data ['atom_ener' ]):
3638 if ae is not None :
@@ -162,7 +164,7 @@ def build (self,
162164 initializer = tf .constant_initializer (self .aparam_inv_std ))
163165
164166 start_index = 0
165- inputs = tf .reshape (inputs , [- 1 , self .dim_descrpt * natoms [0 ]])
167+ inputs = tf .cast ( tf . reshape (inputs , [- 1 , self .dim_descrpt * natoms [0 ]]), self . fitting_precision )
166168
167169 if bias_atom_e is not None :
168170 assert (len (bias_atom_e ) == self .ntypes )
@@ -203,10 +205,10 @@ def build (self,
203205
204206 for ii in range (0 ,len (self .n_neuron )) :
205207 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
206- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn )
208+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
207209 else :
208- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed )
209- final_layer = one_layer (layer , 1 , activation_fn = None , bavg = type_bias_ae , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed )
210+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , precision = self . fitting_precision )
211+ final_layer = one_layer (layer , 1 , activation_fn = None , bavg = type_bias_ae , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , precision = self . fitting_precision )
210212
211213 if type_i < len (self .atom_ener ) and self .atom_ener [type_i ] is not None :
212214 inputs_zero = tf .zeros_like (inputs_i , dtype = global_tf_float_precision )
@@ -217,10 +219,10 @@ def build (self,
217219 layer = tf .concat ([layer , ext_aparam ], axis = 1 )
218220 for ii in range (0 ,len (self .n_neuron )) :
219221 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
220- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed , use_timestep = self .resnet_dt )
222+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed , use_timestep = self .resnet_dt , precision = self . fitting_precision )
221223 else :
222- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed )
223- zero_layer = one_layer (layer , 1 , activation_fn = None , bavg = type_bias_ae , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed )
224+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed , precision = self . fitting_precision )
225+ zero_layer = one_layer (layer , 1 , activation_fn = None , bavg = type_bias_ae , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = True , seed = self .seed , precision = self . fitting_precision )
224226 final_layer += self .atom_ener [type_i ] - zero_layer
225227
226228 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ], natoms [2 + type_i ]])
@@ -231,7 +233,7 @@ def build (self,
231233 else :
232234 outs = tf .concat ([outs , final_layer ], axis = 1 )
233235
234- return tf .reshape (outs , [- 1 ])
236+ return tf .cast ( tf . reshape (outs , [- 1 ]), self . fitting_precision )
235237
236238
237239class WFCFitting () :
@@ -246,14 +248,16 @@ def __init__ (self, jdata, descrpt) :
246248 .add ('wfc_numb' , int , must = True )\
247249 .add ('sel_type' , [list ,int ], default = [ii for ii in range (self .ntypes )], alias = 'wfc_type' )\
248250 .add ('seed' , int )\
249- .add ("activation_function" , str , default = "tanh" )
251+ .add ("activation_function" , str , default = "tanh" )\
252+ .add ('precision' , int , default = 0 )
250253 class_data = args .parse (jdata )
251254 self .n_neuron = class_data ['neuron' ]
252255 self .resnet_dt = class_data ['resnet_dt' ]
253256 self .wfc_numb = class_data ['wfc_numb' ]
254257 self .sel_type = class_data ['sel_type' ]
255258 self .seed = class_data ['seed' ]
256259 self .fitting_activation_fn = get_activation_func (class_data ["activation_function" ])
260+ self .fitting_precision = get_precision_func (class_data ['precision' ])
257261 self .useBN = False
258262
259263
@@ -273,7 +277,7 @@ def build (self,
273277 reuse = None ,
274278 suffix = '' ) :
275279 start_index = 0
276- inputs = tf .reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]])
280+ inputs = tf .cast ( tf . reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]]), self . fitting_precision )
277281 rot_mat = tf .reshape (rot_mat , [- 1 , 9 * natoms [0 ]])
278282
279283 count = 0
@@ -293,11 +297,11 @@ def build (self,
293297 layer = inputs_i
294298 for ii in range (0 ,len (self .n_neuron )) :
295299 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
296- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn )
300+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
297301 else :
298- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn )
302+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
299303 # (nframes x natoms) x (nwfc x 3)
300- final_layer = one_layer (layer , self .wfc_numb * 3 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed )
304+ final_layer = one_layer (layer , self .wfc_numb * 3 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , precision = self . fitting_precision )
301305 # (nframes x natoms) x nwfc(wc) x 3(coord_local)
302306 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ] * natoms [2 + type_i ], self .wfc_numb , 3 ])
303307 # (nframes x natoms) x nwfc(wc) x 3(coord)
@@ -312,7 +316,7 @@ def build (self,
312316 outs = tf .concat ([outs , final_layer ], axis = 1 )
313317 count += 1
314318
315- return tf .reshape (outs , [- 1 ])
319+ return tf .cast ( tf . reshape (outs , [- 1 ]), self . fitting_precision )
316320
317321
318322
@@ -327,13 +331,15 @@ def __init__ (self, jdata, descrpt) :
327331 .add ('resnet_dt' , bool , default = True )\
328332 .add ('sel_type' , [list ,int ], default = [ii for ii in range (self .ntypes )], alias = 'pol_type' )\
329333 .add ('seed' , int )\
330- .add ("activation_function" , str , default = "tanh" )
334+ .add ("activation_function" , str , default = "tanh" )\
335+ .add ('precision' , int , default = 0 )
331336 class_data = args .parse (jdata )
332337 self .n_neuron = class_data ['neuron' ]
333338 self .resnet_dt = class_data ['resnet_dt' ]
334339 self .sel_type = class_data ['sel_type' ]
335340 self .seed = class_data ['seed' ]
336341 self .fitting_activation_fn = get_activation_func (class_data ["activation_function" ])
342+ self .fitting_precision = get_precision_func (class_data ['precision' ])
337343 self .useBN = False
338344
339345 def get_sel_type (self ):
@@ -349,7 +355,7 @@ def build (self,
349355 reuse = None ,
350356 suffix = '' ) :
351357 start_index = 0
352- inputs = tf .reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]])
358+ inputs = tf .cast ( tf . reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]]), self . fitting_precision )
353359 rot_mat = tf .reshape (rot_mat , [- 1 , 9 * natoms [0 ]])
354360
355361 count = 0
@@ -369,11 +375,11 @@ def build (self,
369375 layer = inputs_i
370376 for ii in range (0 ,len (self .n_neuron )) :
371377 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
372- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn )
378+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
373379 else :
374- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn )
380+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
375381 # (nframes x natoms) x 9
376- final_layer = one_layer (layer , 9 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed )
382+ final_layer = one_layer (layer , 9 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , precision = self . fitting_precision )
377383 # (nframes x natoms) x 3 x 3
378384 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ] * natoms [2 + type_i ], 3 , 3 ])
379385 # (nframes x natoms) x 3 x 3
@@ -392,7 +398,7 @@ def build (self,
392398 outs = tf .concat ([outs , final_layer ], axis = 1 )
393399 count += 1
394400
395- return tf .reshape (outs , [- 1 ])
401+ return tf .cast ( tf . reshape (outs , [- 1 ]), self . fitting_precision )
396402
397403
398404class PolarFittingSeA () :
@@ -409,7 +415,8 @@ def __init__ (self, jdata, descrpt) :
409415 .add ('scale' , [list ,float ], default = [1.0 for ii in range (self .ntypes )])\
410416 .add ('sel_type' , [list ,int ], default = [ii for ii in range (self .ntypes )], alias = 'pol_type' )\
411417 .add ('seed' , int )\
412- .add ("activation_function" , str , default = "tanh" )
418+ .add ("activation_function" , str , default = "tanh" )\
419+ .add ('precision' , int , default = 0 )
413420 class_data = args .parse (jdata )
414421 self .n_neuron = class_data ['neuron' ]
415422 self .resnet_dt = class_data ['resnet_dt' ]
@@ -419,6 +426,7 @@ def __init__ (self, jdata, descrpt) :
419426 self .diag_shift = class_data ['diag_shift' ]
420427 self .scale = class_data ['scale' ]
421428 self .fitting_activation_fn = get_activation_func (class_data ["activation_function" ])
429+ self .fitting_precision = get_precision_func (class_data ['precision' ])
422430 if type (self .sel_type ) is not list :
423431 self .sel_type = [self .sel_type ]
424432 if type (self .diag_shift ) is not list :
@@ -459,7 +467,7 @@ def build (self,
459467 reuse = None ,
460468 suffix = '' ) :
461469 start_index = 0
462- inputs = tf .reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]])
470+ inputs = tf .cast ( tf . reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]]), self . fitting_precision )
463471 rot_mat = tf .reshape (rot_mat , [- 1 , self .dim_rot_mat * natoms [0 ]])
464472
465473 count = 0
@@ -479,16 +487,16 @@ def build (self,
479487 layer = inputs_i
480488 for ii in range (0 ,len (self .n_neuron )) :
481489 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
482- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn )
490+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
483491 else :
484- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn )
492+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
485493 if self .fit_diag :
486494 bavg = np .zeros (self .dim_rot_mat_1 )
487495 # bavg[0] = self.avgeig[0]
488496 # bavg[1] = self.avgeig[1]
489497 # bavg[2] = self.avgeig[2]
490498 # (nframes x natoms) x naxis
491- final_layer = one_layer (layer , self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , bavg = bavg )
499+ final_layer = one_layer (layer , self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , bavg = bavg , precision = self . fitting_precision )
492500 # (nframes x natoms) x naxis
493501 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ] * natoms [2 + type_i ], self .dim_rot_mat_1 ])
494502 # (nframes x natoms) x naxis x naxis
@@ -499,7 +507,7 @@ def build (self,
499507 # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1]
500508 # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2]
501509 # (nframes x natoms) x (naxis x naxis)
502- final_layer = one_layer (layer , self .dim_rot_mat_1 * self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , bavg = bavg )
510+ final_layer = one_layer (layer , self .dim_rot_mat_1 * self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , bavg = bavg , precision = self . fitting_precision )
503511 # (nframes x natoms) x naxis x naxis
504512 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ] * natoms [2 + type_i ], self .dim_rot_mat_1 , self .dim_rot_mat_1 ])
505513 # (nframes x natoms) x naxis x naxis
@@ -522,7 +530,7 @@ def build (self,
522530 outs = tf .concat ([outs , final_layer ], axis = 1 )
523531 count += 1
524532
525- return tf .reshape (outs , [- 1 ])
533+ return tf .cast ( tf . reshape (outs , [- 1 ]), self . fitting_precision )
526534
527535
528536class GlobalPolarFittingSeA () :
@@ -564,13 +572,15 @@ def __init__ (self, jdata, descrpt) :
564572 .add ('resnet_dt' , bool , default = True )\
565573 .add ('sel_type' , [list ,int ], default = [ii for ii in range (self .ntypes )], alias = 'dipole_type' )\
566574 .add ('seed' , int )\
567- .add ("activation_function" , str , default = "tanh" )
575+ .add ("activation_function" , str , default = "tanh" )\
576+ .add ('precision' , int , default = 0 )
568577 class_data = args .parse (jdata )
569578 self .n_neuron = class_data ['neuron' ]
570579 self .resnet_dt = class_data ['resnet_dt' ]
571580 self .sel_type = class_data ['sel_type' ]
572581 self .seed = class_data ['seed' ]
573582 self .fitting_activation_fn = get_activation_func (class_data ["activation_function" ])
583+ self .fitting_precision = get_precision_func (class_data ['precision' ])
574584 self .dim_rot_mat_1 = descrpt .get_dim_rot_mat_1 ()
575585 self .dim_rot_mat = self .dim_rot_mat_1 * 3
576586 self .useBN = False
@@ -588,7 +598,7 @@ def build (self,
588598 reuse = None ,
589599 suffix = '' ) :
590600 start_index = 0
591- inputs = tf .reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]])
601+ inputs = tf .cast ( tf . reshape (input_d , [- 1 , self .dim_descrpt * natoms [0 ]]), self . fitting_precision )
592602 rot_mat = tf .reshape (rot_mat , [- 1 , self .dim_rot_mat * natoms [0 ]])
593603
594604 count = 0
@@ -608,11 +618,11 @@ def build (self,
608618 layer = inputs_i
609619 for ii in range (0 ,len (self .n_neuron )) :
610620 if ii >= 1 and self .n_neuron [ii ] == self .n_neuron [ii - 1 ] :
611- layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn )
621+ layer += one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , use_timestep = self .resnet_dt , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
612622 else :
613- layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn )
623+ layer = one_layer (layer , self .n_neuron [ii ], name = 'layer_' + str (ii )+ '_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , activation_fn = self .fitting_activation_fn , precision = self . fitting_precision )
614624 # (nframes x natoms) x naxis
615- final_layer = one_layer (layer , self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed )
625+ final_layer = one_layer (layer , self .dim_rot_mat_1 , activation_fn = None , name = 'final_layer_type_' + str (type_i )+ suffix , reuse = reuse , seed = self .seed , precision = self . fitting_precision )
616626 # (nframes x natoms) x 1 * naxis
617627 final_layer = tf .reshape (final_layer , [tf .shape (inputs )[0 ] * natoms [2 + type_i ], 1 , self .dim_rot_mat_1 ])
618628 # (nframes x natoms) x 1 x 3(coord)
@@ -627,5 +637,5 @@ def build (self,
627637 outs = tf .concat ([outs , final_layer ], axis = 1 )
628638 count += 1
629639
630- return tf .reshape (outs , [- 1 ])
640+ return tf .cast ( tf . reshape (outs , [- 1 ]), self . fitting_precision )
631641 # return tf.reshape(outs, [tf.shape(inputs)[0] * natoms[0] * 3 // 3])
0 commit comments