2424# 'autoint_nets', 'fg_nets', 'fgcnn_cin_nets', 'fgcnn_fm_nets', 'fgcnn_ipnn_nets',
2525# 'fgcnn_dnn_nets', 'fibi_nets', 'fibi_dnn_nets']
2626
27+ def _concat_embeddings (embeddings , concat_layer_name ):
28+ if embeddings is not None :
29+ len_embeddings = len (embeddings )
30+ if len_embeddings > 1 :
31+ return Concatenate (axis = 1 , name = concat_layer_name )(embeddings )
32+ elif len_embeddings == 1 :
33+ return embeddings [0 ]
34+ else :
35+ return None
36+ else :
37+ return None
38+
2739
2840def linear (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc ):
2941 """
3042 Linear(order-1) interactions
3143 """
3244 x = None
3345 x_emb = None
34- if embeddings is not None and len (embeddings ) > 1 :
35- concat_embeddings = Concatenate ( axis = 1 , name = 'concat_linear_embedding' )( embeddings )
46+ concat_embeddings = _concat_embeddings (embeddings , 'concat_linear_embedding' )
47+ if concat_embeddings is not None :
3648 x_emb = tf .reduce_sum (concat_embeddings , axis = - 1 , name = 'linear_reduce_sum' )
49+ else :
50+ return None
3751
3852 if x_emb is not None and dense_layer is not None :
3953 x = Concatenate (name = 'concat_linear_emb_dense' )([x_emb , dense_layer ])
@@ -56,10 +70,10 @@ def cin_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, confi
5670 is measured explicitly; (3) the complexity of network will not grow exponentially with the degree
5771 of interactions.
5872 """
59- if embeddings is None or len (embeddings ) <= 1 :
73+ cin_concat = _concat_embeddings (embeddings , 'concat_cin_embedding' )
74+ if cin_concat is None :
6075 model_desc .add_net ('cin' , (None ), (None ))
6176 return None
62- cin_concat = Concatenate (axis = 1 , name = 'concat_cin_embedding' )(embeddings )
6377 cin_output = layers .CIN (params = config .cin_params )(cin_concat )
6478 model_desc .add_net ('cin' , cin_concat .shape , cin_output .shape )
6579 return cin_output
@@ -69,20 +83,22 @@ def fm_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, config
6983 """
7084 FM models pairwise(order-2) feature interactions
7185 """
72- if embeddings is None or len (embeddings ) <= 1 :
86+ concat_embeddings_layer = _concat_embeddings (embeddings , 'concat_fm_embedding' )
87+
88+ if concat_embeddings_layer is None :
7389 model_desc .add_net ('fm' , (None ), (None ))
7490 return None
75- fm_concat = Concatenate (axis = 1 , name = 'concat_fm_embedding' )(embeddings )
76- fm_output = layers .FM (name = 'fm_layer' )(fm_concat )
77- model_desc .add_net ('fm' , fm_concat .shape , fm_output .shape )
91+ # fm_concat = Concatenate(axis=1, name='concat_fm_embedding')(embeddings)
92+ fm_output = layers .FM (name = 'fm_layer' )(concat_embeddings_layer )
93+ model_desc .add_net ('fm' , concat_embeddings_layer .shape , fm_output .shape )
7894 return fm_output
7995
8096
8197def afm_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc ):
8298 """Attentional Factorization Machine (AFM), which learns the importance of each feature interaction
8399 from datasets via a neural attention network.
84100 """
85- if embeddings is None or len (embeddings ) <= 1 :
101+ if embeddings is None or len (embeddings ) < 2 : # Need at last 2 embedding
86102 return None
87103 afm_output = layers .AFM (params = config .afm_params , name = 'afm_layer' )(embeddings )
88104 model_desc .add_net ('afm' , f'list({ len (embeddings )} )' , afm_output .shape )
@@ -94,7 +110,7 @@ def opnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, conf
94110 Outer Product-based Neural Network
95111 OuterProduct+DNN
96112 """
97- if embeddings is None or len (embeddings ) <= 1 :
113+ if embeddings is None or len (embeddings ) < 2 : # at least 2 embedding to build `layers.InnerProduct`
98114 return None
99115
100116 op = layers .OuterProduct (config .pnn_params , name = 'outer_product_layer' )(embeddings )
@@ -111,7 +127,7 @@ def ipnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, conf
111127 Inner Product-based Neural Network
112128 InnerProduct+DNN
113129 """
114- if embeddings is None or len (embeddings ) <= 1 :
130+ if embeddings is None or len (embeddings ) < 2 : # at least 2 embedding to build `layers.InnerProduct`
115131 return None
116132
117133 ip = layers .InnerProduct (name = 'inner_product_layer' )(embeddings )
@@ -127,7 +143,7 @@ def pnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, confi
127143 """
128144 Concatenation of inner product and outer product + DNN
129145 """
130- if embeddings is None or len (embeddings ) <= 1 :
146+ if embeddings is None or len (embeddings ) < 2 : # at least 2 embedding to build `layers.InnerProduct`
131147 return None
132148
133149 ip = layers .InnerProduct (name = 'pnn_inner_product_layer' )(embeddings )
@@ -193,15 +209,16 @@ def autoint_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, c
193209 """
194210 AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks.
195211 """
196- if embeddings is None or len (embeddings ) <= 1 :
212+ concat_embeddings_layer = _concat_embeddings (embeddings , 'concat_autoint_embedding' )
213+ if concat_embeddings_layer is None :
197214 model_desc .add_net ('autoint' , (None ), (None ))
198215 return None
199- autoint_emb_concat = Concatenate (axis = 1 , name = 'concat_autoint_embedding' )(embeddings )
200- output = autoint_emb_concat
216+ # autoint_emb_concat = Concatenate(axis=1, name='concat_autoint_embedding')(embeddings)
217+ output = concat_embeddings_layer
201218 for i in range (config .autoint_params ['num_attention' ]):
202219 output = layers .MultiheadAttention (params = config .autoint_params )(output )
203220 output = Flatten ()(output )
204- model_desc .add_net ('autoint' , autoint_emb_concat .shape , output .shape )
221+ model_desc .add_net ('autoint' , concat_embeddings_layer .shape , output .shape )
205222 return output
206223
207224
@@ -215,12 +232,13 @@ def fg_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, config
215232 .. [1] `Liu B, Tang R, Chen Y, et al. Feature generation by convolutional neural network
216233 for click-through rate prediction[C]//The World Wide Web Conference. 2019: 1119-1129.`
217234 """
218- if embeddings is None or len (embeddings ) <= 1 :
235+ fgcnn_emb_concat_index = counter .next_num ('concat_fgcnn_embedding' )
236+ fgcnn_emb_concat = _concat_embeddings (embeddings , f'concat_fgcnn_embedding_{ fgcnn_emb_concat_index } ' )
237+
238+ if fgcnn_emb_concat is None :
219239 model_desc .add_net ('fgcnn' , (None ), (None ))
220240 return None
221-
222- fgcnn_emb_concat_index = counter .next_num ('concat_fgcnn_embedding' )
223- fgcnn_emb_concat = Concatenate (axis = 1 , name = f'concat_fgcnn_embedding_{ fgcnn_emb_concat_index } ' )(embeddings )
241+ # fgcnn_emb_concat = Concatenate(axis=1, name=f'concat_fgcnn_embedding_{fgcnn_emb_concat_index}')(embeddings)
224242 fg_inputs = tf .expand_dims (fgcnn_emb_concat , axis = - 1 )
225243 fg_filters = config .fgcnn_params .get ('fg_filters' , (14 , 16 ))
226244 fg_heights = config .fgcnn_params .get ('fg_heights' , (7 , 7 ))
@@ -244,10 +262,11 @@ def fgcnn_cin_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense,
244262 """
245263 FGCNN with CIN as deep classifier
246264 """
247- if embeddings is None or len (embeddings ) <= 1 :
265+ fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
266+
267+ if fg_output is None :
248268 return None
249269
250- fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
251270 cin_output = layers .CIN (params = config .cin_params )(fg_output )
252271 model_desc .add_net ('fgcnn-cin' , fg_output .shape , cin_output .shape )
253272 return cin_output
@@ -257,10 +276,11 @@ def fgcnn_fm_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense,
257276 """
258277 FGCNN with FM as deep classifier
259278 """
260- if embeddings is None or len (embeddings ) <= 1 :
261- return None
262279
263280 fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
281+ if fg_output is None :
282+ return None
283+
264284 fm_output = layers .FM (name = 'fm_fgcnn_layer' )(fg_output )
265285 model_desc .add_net ('fgcnn-fm' , fg_output .shape , fm_output .shape )
266286 return fm_output
@@ -271,6 +291,9 @@ def fgcnn_afm_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense,
271291 FGCNN with AFM as deep classifier
272292 """
273293 fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
294+ if fg_output is None :
295+ return None
296+
274297 split_features = tf .split (fg_output , fg_output .shape [1 ], axis = 1 )
275298 afm_output = layers .AFM (params = config .afm_params )(split_features )
276299 model_desc .add_net ('fgcnn-afm' , fg_output .shape , afm_output .shape )
@@ -281,14 +304,16 @@ def fgcnn_ipnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense
281304 """
282305 FGCNN with IPNN as deep classifier
283306 """
284- if embeddings is None or len (embeddings ) <= 1 :
285- return None
286-
287307 fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
308+ if fg_output is None :
309+ return None
288310 split_features = tf .split (fg_output , fg_output .shape [1 ], axis = 1 )
289311 inner_product = layers .InnerProduct ()(split_features )
290312 # dnn_input = Flatten()(concat_all_features)
291- dnn_input = Concatenate ()([Flatten ()(fg_output ), inner_product , dense_layer ])
313+ dnn_input_layers = [Flatten ()(fg_output ), inner_product ]
314+ if dense_layer is not None :
315+ dnn_input_layers .append (dense_layer )
316+ dnn_input = Concatenate ()(dnn_input_layers )
292317 dnn_out = dnn (dnn_input , config .dnn_params , cellname = 'fgcnn_ipnn' )
293318 model_desc .add_net ('fgcnn-ipnn' , fg_output .shape , dnn_out .shape )
294319 return dnn_out
@@ -298,11 +323,15 @@ def fgcnn_dnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense,
298323 """
299324 FGCNN with DNN as deep classifier
300325 """
301- if embeddings is None or len (embeddings ) <= 1 :
326+ fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
327+ if fg_output is None :
302328 return None
303329
304- fg_output = fg_nets (embeddings , flatten_emb_layer , dense_layer , concat_emb_dense , config , model_desc )
305- dnn_input = Concatenate ()([Flatten ()(fg_output ), dense_layer ])
330+ if dense_layer is not None :
331+ dnn_input = Concatenate ()([Flatten ()(fg_output ), dense_layer ])
332+ else :
333+ dnn_input = Flatten ()(fg_output )
334+
306335 dnn_out = dnn (dnn_input , config .dnn_params , cellname = 'fgcnn_dnn' )
307336 model_desc .add_net ('fgcnn-ipnn' , fg_output .shape , dnn_out .shape )
308337 return dnn_out
@@ -316,14 +345,12 @@ def fibi_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, conf
316345 these cross features are concatenated by a combination layer which merges the outputs of
317346 Bilinear-Interaction layer.
318347 """
319- if embeddings is None or len (embeddings ) <= 1 :
348+ senet_index = counter .next_num ('senet_layer' )
349+ senet_emb_concat = _concat_embeddings (embeddings , f'concat_senet_embedding_{ senet_index } ' )
350+ if senet_emb_concat is None :
320351 model_desc .add_net ('fibi' , (None ), (None ))
321352 return None
322353
323- senet_index = counter .next_num ('senet_layer' )
324-
325- senet_emb_concat = Concatenate (axis = 1 , name = f'concat_senet_embedding_{ senet_index } ' )(embeddings )
326-
327354 senet_pooling_op = config .fibinet_params .get ('senet_pooling_op' , 'mean' )
328355 senet_reduction_ratio = config .fibinet_params .get ('senet_reduction_ratio' , 3 )
329356 bilinear_type = config .fibinet_params .get ('bilinear_type' , 'field_interaction' )
0 commit comments