1919import math
2020
2121
22- class NCFLayer (nn .Layer ):
23- def __init__ (self , num_users , num_items , latent_dim , layers ):
24- super (NCFLayer , self ).__init__ ()
22+ class NCF_NeuMF_Layer (nn .Layer ):
23+ def __init__ (self , num_users , num_items , mf_dim , layers ):
24+ super (NCF_NeuMF_Layer , self ).__init__ ()
2525
2626 self .num_users = num_users
2727 self .num_items = num_items
28- self .latent_dim = latent_dim
28+ self .mf_dim = mf_dim
2929 self .layers = layers
3030
3131 self .MF_Embedding_User = paddle .nn .Embedding (
3232 self .num_users ,
33- self .latent_dim ,
34- sparse = True ,
35- weight_attr = nn .initializer .Normal (
36- mean = 0.0 , std = 0.01 ))
33+ self .mf_dim ,
34+ sparse = False ,
35+ weight_attr = paddle .ParamAttr (
36+ initializer = nn .initializer .Normal (
37+ mean = 0.0 , std = 0.01 ),
38+ regularizer = paddle .regularizer .L2Decay (coeff = 0 )))
3739 self .MF_Embedding_Item = paddle .nn .Embedding (
3840 self .num_items ,
39- self .latent_dim ,
40- sparse = True ,
41- weight_attr = nn .initializer .Normal (
42- mean = 0.0 , std = 0.01 ))
41+ self .mf_dim ,
42+ sparse = False ,
43+ weight_attr = paddle .ParamAttr (
44+ initializer = nn .initializer .Normal (
45+ mean = 0.0 , std = 0.01 ),
46+ regularizer = paddle .regularizer .L2Decay (coeff = 0 )))
4347 self .MLP_Embedding_User = paddle .nn .Embedding (
4448 self .num_users ,
4549 int (self .layers [0 ] / 2 ),
46- sparse = True ,
47- weight_attr = nn .initializer .Normal (
48- mean = 0.0 , std = 0.01 ))
50+ sparse = False ,
51+ weight_attr = paddle .ParamAttr (
52+ initializer = nn .initializer .Normal (
53+ mean = 0.0 , std = 0.01 ),
54+ regularizer = paddle .regularizer .L2Decay (coeff = 0 )))
4955 self .MLP_Embedding_Item = paddle .nn .Embedding (
5056 self .num_items ,
5157 int (self .layers [0 ] / 2 ),
52- sparse = True ,
53- weight_attr = nn .initializer .Normal (
54- mean = 0.0 , std = 0.01 ))
58+ sparse = False ,
59+ weight_attr = paddle .ParamAttr (
60+ initializer = nn .initializer .Normal (
61+ mean = 0.0 , std = 0.01 ),
62+ regularizer = paddle .regularizer .L2Decay (coeff = 0 )))
5563
5664 num_layer = len (self .layers )
5765 self .MLP_fc = []
@@ -62,7 +70,7 @@ def __init__(self, num_users, num_items, latent_dim, layers):
6270 weight_attr = paddle .ParamAttr (
6371 initializer = nn .initializer .TruncatedNormal (
6472 mean = 0.0 , std = 1.0 / math .sqrt (self .layers [i - 1 ])),
65- regularizer = paddle .regularizer .L2Decay (coeff = 1e-4 )),
73+ regularizer = paddle .regularizer .L2Decay (coeff = 0 )),
6674 name = 'layer_' + str (i ))
6775 self .add_sublayer ('layer_%d' % i , Linear )
6876 self .MLP_fc .append (Linear )
@@ -73,7 +81,8 @@ def __init__(self, num_users, num_items, latent_dim, layers):
7381 self .prediction = paddle .nn .Linear (
7482 in_features = self .layers [2 ],
7583 out_features = 1 ,
76- weight_attr = nn .initializer .KaimingUniform (fan_in = None ),
84+ weight_attr = nn .initializer .KaimingUniform (fan_in = self .layers [2 ] *
85+ 2 ),
7786 name = 'prediction' )
7887 self .sigmoid = paddle .nn .Sigmoid ()
7988
@@ -112,3 +121,121 @@ def forward(self, input_data):
112121 prediction = self .prediction (predict_vector )
113122 prediction = self .sigmoid (prediction )
114123 return prediction
124+
125+
126+ class NCF_GMF_Layer (nn .Layer ):
127+ def __init__ (self , num_users , num_items , mf_dim , layers ):
128+ super (NCF_GMF_Layer , self ).__init__ ()
129+
130+ self .num_users = num_users
131+ self .num_items = num_items
132+ self .mf_dim = mf_dim
133+ self .layers = layers
134+
135+ self .MF_Embedding_User = paddle .nn .Embedding (
136+ self .num_users ,
137+ self .mf_dim ,
138+ sparse = True ,
139+ weight_attr = nn .initializer .Normal (
140+ mean = 0.0 , std = 0.01 ))
141+
142+ self .MF_Embedding_Item = paddle .nn .Embedding (
143+ self .num_items ,
144+ self .mf_dim ,
145+ sparse = True ,
146+ weight_attr = nn .initializer .Normal (
147+ mean = 0.0 , std = 0.01 ))
148+
149+ self .prediction = paddle .nn .Linear (
150+ in_features = self .layers [3 ],
151+ out_features = 1 ,
152+ weight_attr = nn .initializer .KaimingUniform (fan_in = None ),
153+ name = 'prediction' )
154+
155+ self .sigmoid = paddle .nn .Sigmoid ()
156+
157+ def forward (self , input_data ):
158+
159+ user_input = input_data [0 ]
160+ item_input = input_data [1 ]
161+ label = input_data [2 ]
162+
163+ user_embedding_mf = self .MF_Embedding_User (user_input )
164+ mf_user_latent = paddle .flatten (
165+ x = user_embedding_mf , start_axis = 1 , stop_axis = 2 )
166+ item_embedding_mf = self .MF_Embedding_Item (item_input )
167+ mf_item_latent = paddle .flatten (
168+ x = item_embedding_mf , start_axis = 1 , stop_axis = 2 )
169+ mf_vector = paddle .multiply (mf_user_latent , mf_item_latent )
170+ prediction = self .prediction (mf_vector )
171+ prediction = self .sigmoid (prediction )
172+ return prediction
173+
174+
175+ class NCF_MLP_Layer (nn .Layer ):
176+ def __init__ (self , num_users , num_items , mf_dim , layers ):
177+ super (NCF_MLP_Layer , self ).__init__ ()
178+
179+ self .num_users = num_users
180+ self .num_items = num_items
181+ self .mf_dim = mf_dim
182+ self .layers = layers
183+
184+ self .MLP_Embedding_User = paddle .nn .Embedding (
185+ self .num_users ,
186+ int (self .layers [0 ] / 2 ),
187+ sparse = True ,
188+ weight_attr = nn .initializer .Normal (
189+ mean = 0.0 , std = 0.01 ))
190+ self .MLP_Embedding_Item = paddle .nn .Embedding (
191+ self .num_items ,
192+ int (self .layers [0 ] / 2 ),
193+ sparse = True ,
194+ weight_attr = nn .initializer .Normal (
195+ mean = 0.0 , std = 0.01 ))
196+
197+ num_layer = len (self .layers )
198+ self .MLP_fc = []
199+ for i in range (1 , num_layer ):
200+ Linear = paddle .nn .Linear (
201+ in_features = self .layers [i - 1 ],
202+ out_features = self .layers [i ],
203+ weight_attr = paddle .ParamAttr (
204+ initializer = nn .initializer .TruncatedNormal (
205+ mean = 0.0 , std = 1.0 / math .sqrt (self .layers [i - 1 ]))),
206+ name = 'layer_' + str (i ))
207+ self .add_sublayer ('layer_%d' % i , Linear )
208+ self .MLP_fc .append (Linear )
209+ act = paddle .nn .ReLU ()
210+ self .add_sublayer ('act_%d' % i , act )
211+ self .MLP_fc .append (act )
212+
213+ self .prediction = paddle .nn .Linear (
214+ in_features = self .layers [3 ],
215+ out_features = 1 ,
216+ weight_attr = nn .initializer .KaimingUniform (fan_in = self .layers [3 ] *
217+ 2 ),
218+ name = 'prediction' )
219+
220+ self .sigmoid = paddle .nn .Sigmoid ()
221+
222+ def forward (self , input_data ):
223+ user_input = input_data [0 ]
224+ item_input = input_data [1 ]
225+ label = input_data [2 ]
226+
227+ user_embedding_mlp = self .MLP_Embedding_User (user_input )
228+ mlp_user_latent = paddle .flatten (
229+ x = user_embedding_mlp , start_axis = 1 , stop_axis = 2 )
230+ item_embedding_mlp = self .MLP_Embedding_Item (item_input )
231+ mlp_item_latent = paddle .flatten (
232+ x = item_embedding_mlp , start_axis = 1 , stop_axis = 2 )
233+ mlp_vector = paddle .concat (
234+ x = [mlp_user_latent , mlp_item_latent ], axis = - 1 )
235+
236+ for n_layer in self .MLP_fc :
237+ mlp_vector = n_layer (mlp_vector )
238+
239+ prediction = self .prediction (mlp_vector )
240+ prediction = self .sigmoid (prediction )
241+ return prediction
0 commit comments