Skip to content

Commit 33bc642

Browse files
authored
Merge pull request #149 from amcadmus/devel
scale and shift the polar predictions, scale the tensor loss; fix bug of lammps pppm interface
2 parents 3d74f12 + 286c318 commit 33bc642

File tree

6 files changed

+39
-2
lines changed

6 files changed

+39
-2
lines changed

source/CMakeLists.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,16 @@ if (USE_TTM)
133133
set(TTM_DEF "-DUSE_TTM")
134134
endif (USE_TTM)
135135

136+
# old pppm interface
137+
if(NOT DEFINED OLD_LMP_PPPM)
138+
set(OLD_LMP_PPPM FALSE)
139+
endif(NOT DEFINED OLD_LMP_PPPM)
140+
if (OLD_LMP_PPPM)
141+
set(OLD_LMP_PPPM_DEF "-DOLD_LMP_PPPM")
142+
message(STATUS "Use old lammps pppm interface")
143+
endif()
144+
add_definitions (${OLD_LMP_PPPM_DEF})
145+
136146
# define build type
137147
if ((NOT DEFINED CMAKE_BUILD_TYPE) OR CMAKE_BUILD_TYPE STREQUAL "")
138148
set (CMAKE_BUILD_TYPE release)

source/lmp/env.sh.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"`
66
TF_LIBRARY_PATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -L/g"`
77
TF_RPATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -Wl,-rpath=/g"`
88

9-
NNP_INC=" -std=c++11 @PREC_DEF@ @TTM_DEF@ -I$TF_INCLUDE_DIRS -I$DEEPMD_ROOT/include/deepmd "
9+
NNP_INC=" -std=c++11 @PREC_DEF@ @TTM_DEF@ @OLD_LMP_PPPM_DEF@ -I$TF_INCLUDE_DIRS -I$DEEPMD_ROOT/include/deepmd "
1010
NNP_PATH=" -L$TF_LIBRARY_PATH -L$DEEPMD_ROOT/lib"
1111
NNP_LIB=" -Wl,--no-as-needed -l@LIB_DEEPMD_OP@ -l@LIB_DEEPMD_OP_CUDA@ -l@LIB_DEEPMD@ -ltensorflow_cc -ltensorflow_framework -Wl,-rpath=$TF_RPATH -Wl,-rpath=$DEEPMD_ROOT/lib"

source/lmp/pppm_dplr.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,13 @@ enum{FORWARD_IK,FORWARD_AD,FORWARD_IK_PERATOM,FORWARD_AD_PERATOM};
4040

4141
/* ---------------------------------------------------------------------- */
4242

43+
#ifdef OLD_LMP_PPPM
4344
PPPMDPLR::PPPMDPLR(LAMMPS *lmp, int narg, char **arg) :
4445
PPPM(lmp, narg, arg)
46+
#else
47+
PPPMDPLR::PPPMDPLR(LAMMPS *lmp) :
48+
PPPM(lmp)
49+
#endif
4550
{
4651
triclinic_support = 1;
4752
}

source/lmp/pppm_dplr.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,11 @@ namespace LAMMPS_NS {
3535

3636
class PPPMDPLR : public PPPM {
3737
public:
38+
#ifdef OLD_LMP_PPPM
3839
PPPMDPLR(class LAMMPS *, int, char **);
40+
#else
41+
PPPMDPLR(class LAMMPS *);
42+
#endif
3943
virtual ~PPPMDPLR () {};
4044
void init();
4145
const vector<double > & get_fele() const {return fele;};

source/train/Fitting.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -383,6 +383,8 @@ def __init__ (self, jdata, descrpt) :
383383
.add('neuron', list, default = [120,120,120], alias = 'n_neuron')\
384384
.add('resnet_dt', bool, default = True)\
385385
.add('fit_diag', bool, default = True)\
386+
.add('diag_shift', [list,float], default = [0.0 for ii in range(self.ntypes)])\
387+
.add('scale', [list,float], default = [1.0 for ii in range(self.ntypes)])\
386388
.add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\
387389
.add('seed', int)
388390
class_data = args.parse(jdata)
@@ -391,6 +393,14 @@ def __init__ (self, jdata, descrpt) :
391393
self.sel_type = class_data['sel_type']
392394
self.fit_diag = class_data['fit_diag']
393395
self.seed = class_data['seed']
396+
self.diag_shift = class_data['diag_shift']
397+
self.scale = class_data['scale']
398+
if type(self.sel_type) is not list:
399+
self.sel_type = [self.sel_type]
400+
if type(self.diag_shift) is not list:
401+
self.diag_shift = [self.diag_shift]
402+
if type(self.scale) is not list:
403+
self.scale = [self.scale]
394404
self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
395405
self.dim_rot_mat = self.dim_rot_mat_1 * 3
396406
self.useBN = False
@@ -477,6 +487,10 @@ def build (self,
477487
final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True)
478488
# nframes x natoms x 3 x 3
479489
final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3])
490+
# shift and scale
491+
sel_type_idx = self.sel_type.index(type_i)
492+
final_layer = final_layer * self.scale[sel_type_idx]
493+
final_layer = final_layer + self.diag_shift[sel_type_idx] * tf.eye(3, batch_shape=[tf.shape(inputs)[0], natoms[2+type_i]], dtype = global_tf_float_precision)
480494

481495
# concat the results
482496
if count == 0:

source/train/Loss.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,10 @@ def __init__ (self, jdata, **kwarg) :
284284
self.tensor_size = kwarg['tensor_size']
285285
self.label_name = kwarg['label_name']
286286
self.atomic = kwarg.get('atomic', True)
287+
if jdata is not None:
288+
self.scale = jdata.get('scale', 1.0)
289+
else:
290+
self.scale = 1.0
287291
# data required
288292
add_data_requirement(self.label_name,
289293
self.tensor_size,
@@ -300,7 +304,7 @@ def build (self,
300304
suffix):
301305
polar_hat = label_dict[self.label_name]
302306
polar = model_dict[self.tensor_name]
303-
l2_loss = tf.reduce_mean( tf.square(polar - polar_hat), name='l2_'+suffix)
307+
l2_loss = tf.reduce_mean( tf.square(self.scale*(polar - polar_hat)), name='l2_'+suffix)
304308
if not self.atomic :
305309
atom_norm = 1./ global_cvt_2_tf_float(natoms[0])
306310
l2_loss = l2_loss * atom_norm

0 commit comments

Comments
 (0)