|
15 | 15 | from ..layers.utils import concat_func
|
16 | 16 |
|
17 | 17 |
|
18 |
| -def PNN(dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0, |
19 |
| - init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False, |
20 |
| - kernel_type='mat', task='binary'): |
| 18 | +def PNN(dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, |
| 19 | + seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False, kernel_type='mat', |
| 20 | + task='binary'): |
21 | 21 | """Instantiates the Product-based Neural Network architecture.
|
22 | 22 |
|
23 | 23 | :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
|
24 |
| - :param embedding_size: positive integer,sparse feature embedding_size |
25 | 24 | :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
|
26 | 25 | :param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
|
27 | 26 | :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
|
@@ -51,7 +50,7 @@ def PNN(dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128), l2_r
|
51 | 50 |
|
52 | 51 | # ipnn deep input
|
53 | 52 | linear_signal = tf.keras.layers.Reshape(
|
54 |
| - [len(sparse_embedding_list) * embedding_size])(concat_func(sparse_embedding_list)) |
| 53 | + [sum(map(lambda x:int(x.shape[-1]) ,sparse_embedding_list))])(concat_func(sparse_embedding_list)) |
55 | 54 |
|
56 | 55 | if use_inner and use_outter:
|
57 | 56 | deep_input = tf.keras.layers.Concatenate()(
|
|
0 commit comments