Skip to content

Commit ff8bc57

Browse files
authored
Merge pull request #694 from wangzhen38/bug_fix_del
del useless annotation
2 parents 3b416ac + b034b9d commit ff8bc57

File tree

1 file changed

+9
-55
lines changed

1 file changed

+9
-55
lines changed

models/rank/dcn/net.py

Lines changed: 9 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ def __init__(self, sparse_feature_number, sparse_feature_dim,
3333
self.l2_reg_cross = l2_reg_cross
3434
self.is_sparse = is_sparse
3535

36-
# self.dense_emb_dim = self.sparse_feature_dim
3736
self.init_value_ = 0.1
3837

3938
# sparse coding
@@ -47,17 +46,6 @@ def __init__(self, sparse_feature_number, sparse_feature_dim,
4746
mean=0.0,
4847
std=self.init_value_ /
4948
math.sqrt(float(self.sparse_feature_dim)))))
50-
# print("------self.sparse_embedding-------", self.embedding)
51-
52-
# # dense coding
53-
# self.dense_w = paddle.create_parameter(
54-
# shape=[1, self.dense_feature_dim, self.dense_emb_dim],
55-
# dtype='float32',
56-
# default_initializer=paddle.nn.initializer.TruncatedNormal(
57-
# mean=0.0,
58-
# std=self.init_value_ /
59-
# math.sqrt(float(self.sparse_feature_dim))))
60-
# # print("------self.dense_w-----", self.dense_w) #shape=[1, 13, 9]
6149

6250
# w
6351
self.layer_w = paddle.create_parameter(
@@ -70,7 +58,6 @@ def __init__(self, sparse_feature_number, sparse_feature_dim,
7058
mean=0.0,
7159
std=self.init_value_ /
7260
math.sqrt(float(self.sparse_feature_dim))))
73-
# print("----self.layer_w", self.layer_w) #shape=[1000014]
7461

7562
# b
7663
self.layer_b = paddle.create_parameter(
@@ -83,14 +70,13 @@ def __init__(self, sparse_feature_number, sparse_feature_dim,
8370
mean=0.0,
8471
std=self.init_value_ /
8572
math.sqrt(float(self.sparse_feature_dim))))
86-
# print("----self.layer_b", self.layer_b) #shape=[1000014]
8773

8874
# DNN
8975
self.num_field = self.dense_feature_dim + self.sparse_num_field * self.sparse_feature_dim
9076
sizes = [self.num_field] + self.layer_sizes
9177
acts = ["relu" for _ in range(len(self.layer_sizes))] + [None]
9278
self._mlp_layers = []
93-
for i in range(len(self.layer_sizes)): # + 1):
79+
for i in range(len(self.layer_sizes)):
9480
linear = paddle.nn.Linear(
9581
in_features=sizes[i],
9682
out_features=sizes[i + 1],
@@ -114,52 +100,28 @@ def __init__(self, sparse_feature_number, sparse_feature_dim,
114100
self.dense_feature_dim))))
115101

116102
def _create_embedding_input(self, sparse_inputs, dense_inputs):
117-
# print("-----sparse_inputs-1-----",sparse_inputs)
118-
sparse_inputs_concat = paddle.concat(
119-
sparse_inputs, axis=1) #Tensor(shape=[2, 26])
120-
# print("----sparse_inputs_concat-----", sparse_inputs_concat) # shape(-1, 26)
121-
sparse_embeddings = self.embedding(
122-
sparse_inputs_concat) # shape=[2, 26, 9]
123-
# print("----sparse_embeddings-----", sparse_embeddings) #shape(-1, 26, 9)
103+
sparse_inputs_concat = paddle.concat(sparse_inputs, axis=1)
104+
sparse_embeddings = self.embedding(sparse_inputs_concat)
124105
sparse_embeddings_re = paddle.reshape(
125106
sparse_embeddings,
126-
shape=[-1, self.sparse_num_field *
127-
self.sparse_feature_dim]) # paddle.reshape(x, shape
128-
# print("----sparse_embeddings_re----", sparse_embeddings_re) # shape(-1, 234)
107+
shape=[-1, self.sparse_num_field * self.sparse_feature_dim])
129108
feat_embeddings = paddle.concat([sparse_embeddings_re, dense_inputs],
130109
1)
131-
# print("----feat_embeddings----", feat_embeddings) #shape(-1, 247)
132110
return feat_embeddings
133111

134112
def _cross_layer(self, input_0, input_x):
135-
# print("-----input_0---", input_0) # Tensor(shape=[2, 247])
136-
# print("-----input_x---", input_x) # Tensor(shape=[2, 247])
137-
# print("-----self.layer_w---", self.layer_w) # #[247]
138-
input_w = paddle.multiply(input_x, self.layer_w) #shape=[2, 247]
139-
# print("-----input_w----", input_w)
140-
input_w1 = paddle.sum(input_w, axis=1, keepdim=True) # shape=[2, 1]
141-
# print("-----input_w1----", input_w1)
113+
input_w = paddle.multiply(input_x, self.layer_w)
114+
input_w1 = paddle.sum(input_w, axis=1, keepdim=True)
142115

143-
# input_w = paddle.matmul(input_0, self.layer_w) #shape=[2]
144-
# print("-----input_w----", input_w)
145-
# input_ww0 = paddle.matmul(input_w, input_0)
146-
# print("-----input_ww0----", input_ww0)
147-
148-
input_ww = paddle.multiply(input_0, input_w1) # shape=[2, 247]
149-
# print("-----input_ww----", input_ww)
150-
151-
# input_ww_0 = paddle.sum(input_ww,dim=1, keep_dim=True)
152-
# print("-----input_ww0----", input_ww0)
116+
input_ww = paddle.multiply(input_0, input_w1)
153117

154118
input_layer_0 = paddle.add(input_ww, self.layer_b)
155119
input_layer = paddle.add(input_layer_0, input_x)
156-
# print("-----input_layer----", input_layer)
157120

158121
return input_layer, input_w
159122

160123
def _cross_net(self, input, num_corss_layers):
161124
x = x0 = input
162-
# print("----cross--input----", input)
163125
l2_reg_cross_list = []
164126
for i in range(num_corss_layers):
165127
x, w = self._cross_layer(x0, x)
@@ -173,28 +135,20 @@ def _l2_loss(self, w):
173135
return paddle.sum(paddle.square(w))
174136

175137
def forward(self, sparse_inputs, dense_inputs):
176-
# print("-----sparse_inputs", sparse_inputs)
177-
# print("-----dense_inputs", dense_inputs)
178-
feat_embeddings = self._create_embedding_input(
179-
sparse_inputs, dense_inputs) # shape=[2, 247]
180-
# print("----feat_embeddings-----", feat_embeddings)
138+
feat_embeddings = self._create_embedding_input(sparse_inputs,
139+
dense_inputs)
181140
cross_out, l2_reg_cross_loss = self._cross_net(feat_embeddings,
182141
self.cross_num)
183142

184143
dnn_feat = feat_embeddings
185-
# print("----dnn_feat---",dnn_feat)
186144

187145
for n_layer in self._mlp_layers:
188146
dnn_feat = n_layer(dnn_feat)
189-
# print("----dnn_feat---",dnn_feat)
190147

191148
last_out = paddle.concat([dnn_feat, cross_out], axis=-1)
192-
# print("----last_out---",last_out)
193149

194150
logit = self.fc(last_out)
195-
# print("----logit---",logit)
196151

197152
predict = F.sigmoid(logit)
198-
# print("----predict---",predict)
199153

200154
return predict, l2_reg_cross_loss

0 commit comments

Comments
 (0)