18
18
from ...layers .utils import concat_func , add_func , combined_dnn_input , reduce_sum
19
19
20
20
21
- def DeepFEFMEstimator (linear_feature_columns , dnn_feature_columns , embedding_size = 48 ,
22
- dnn_hidden_units = (1024 , 1024 , 1024 ), l2_reg_linear = 0.000001 , l2_reg_embedding_feat = 0.00001 ,
23
- l2_reg_embedding_field = 0.0000001 , l2_reg_dnn = 0 , seed = 1024 , dnn_dropout = 0.2 ,
21
+ def DeepFEFMEstimator (linear_feature_columns , dnn_feature_columns ,
22
+ dnn_hidden_units = (128 , 128 ), l2_reg_linear = 0.00001 , l2_reg_embedding_feat = 0.00001 ,
23
+ l2_reg_embedding_field = 0.00001 , l2_reg_dnn = 0 , seed = 1024 , dnn_dropout = 0.0 ,
24
24
dnn_activation = 'relu' , dnn_use_bn = False , task = 'binary' , model_dir = None ,
25
25
config = None , linear_optimizer = 'Ftrl' , dnn_optimizer = 'Adagrad' , training_chief_hooks = None ):
26
26
"""Instantiates the DeepFEFM Network architecture or the shallow FEFM architecture (Ablation support not provided
27
27
as estimator is meant for production, Ablation support provided in DeepFEFM implementation in models
28
28
29
29
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
30
30
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
31
- :param embedding_size: positive integer,sparse feature embedding_size
32
31
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
33
32
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
34
33
:param l2_reg_embedding_feat: float. L2 regularizer strength applied to embedding vector of features
@@ -62,10 +61,11 @@ def _model_fn(features, labels, mode, config):
62
61
sparse_embedding_list , dense_value_list = input_from_feature_columns (features , dnn_feature_columns ,
63
62
l2_reg_embedding = l2_reg_embedding_feat )
64
63
65
- fefm_interaction_embedding = FEFMLayer (num_fields = len ( sparse_embedding_list ), embedding_size = embedding_size ,
66
- regularizer = l2_reg_embedding_field )(concat_func (sparse_embedding_list , axis = 1 ))
64
+ fefm_interaction_embedding = FEFMLayer (
65
+ regularizer = l2_reg_embedding_field )(concat_func (sparse_embedding_list , axis = 1 ))
67
66
68
- fefm_logit = tf .keras .layers .Lambda (lambda x : reduce_sum (x , axis = 1 , keep_dims = True ))(fefm_interaction_embedding )
67
+ fefm_logit = tf .keras .layers .Lambda (lambda x : reduce_sum (x , axis = 1 , keep_dims = True ))(
68
+ fefm_interaction_embedding )
69
69
70
70
final_logit_components .append (fefm_logit )
71
71
@@ -87,6 +87,3 @@ def _model_fn(features, labels, mode, config):
87
87
training_chief_hooks = training_chief_hooks )
88
88
89
89
return tf .estimator .Estimator (_model_fn , model_dir = model_dir , config = config )
90
-
91
-
92
-
0 commit comments