Skip to content

Commit 9bc3dcf

Browse files
committed
recover dnn
1 parent 5fabe3c commit 9bc3dcf

File tree

1 file changed

+41
-28
lines changed

1 file changed

+41
-28
lines changed

models/rank/dnn/dygraph_model.py

Lines changed: 41 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -23,26 +23,35 @@
2323
class DygraphModel():
2424
# define model
2525
def create_model(self, config):
26-
article_content_size = config.get("hyper_parameters.article_content_size")
27-
article_title_size = config.get("hyper_parameters.article_title_size")
28-
browse_size = config.get("hyper_parameters.browse_size")
29-
neg_condidate_sample_size = config.get("hyper_parameters.neg_condidate_sample_size")
30-
word_dimension = config.get("hyper_parameters.word_dimension")
31-
category_size = config.get("hyper_parameters.category_size")
32-
sub_category_size = config.get("hyper_parameters.sub_category_size")
33-
cate_dimension = config.get("hyper_parameters.category_dimension")
34-
word_dict_size = config.get("hyper_parameters.word_dict_size")
35-
return net.NAMLLayer(config, article_content_size, article_title_size, browse_size, neg_condidate_sample_size,
36-
word_dimension, category_size, sub_category_size, cate_dimension, word_dict_size)
26+
sparse_feature_number = config.get(
27+
"hyper_parameters.sparse_feature_number")
28+
sparse_feature_dim = config.get("hyper_parameters.sparse_feature_dim")
29+
fc_sizes = config.get("hyper_parameters.fc_sizes")
30+
sparse_fea_num = config.get('hyper_parameters.sparse_fea_num')
31+
dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
32+
sparse_input_slot = config.get('hyper_parameters.sparse_inputs_slots')
33+
34+
dnn_model = net.DNNLayer(sparse_feature_number, sparse_feature_dim,
35+
dense_feature_dim, sparse_input_slot - 1,
36+
fc_sizes)
37+
return dnn_model
3738

3839
# define feeds which convert numpy of batch data to paddle.tensor
39-
def create_feeds(self, batch, config):
40-
label = batch[0]
41-
return label, batch[1:], None
40+
def create_feeds(self, batch_data, config):
41+
dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
42+
sparse_tensor = []
43+
for b in batch_data[:-1]:
44+
sparse_tensor.append(
45+
paddle.to_tensor(b.numpy().astype('int64').reshape(-1, 1)))
46+
dense_tensor = paddle.to_tensor(batch_data[-1].numpy().astype(
47+
'float32').reshape(-1, dense_feature_dim))
48+
label = sparse_tensor[0]
49+
return label, sparse_tensor[1:], dense_tensor
4250

4351
# define loss function by predicts and label
44-
def create_loss(self, raw_pred, label):
45-
cost = paddle.nn.functional.cross_entropy(input=raw_pred, label=paddle.cast(label, "float32"), soft_label=True)
52+
def create_loss(self, raw_predict_2d, label):
53+
cost = paddle.nn.functional.cross_entropy(
54+
input=raw_predict_2d, label=label)
4655
avg_cost = paddle.mean(x=cost)
4756
return avg_cost
4857

@@ -56,28 +65,32 @@ def create_optimizer(self, dy_model, config):
5665
# define metrics such as auc/acc
5766
# multi-task need to define multi metric
5867
def create_metrics(self):
59-
metrics_list_name = ["acc"]
60-
auc_metric = paddle.metric.Accuracy()
68+
metrics_list_name = ["auc"]
69+
auc_metric = paddle.metric.Auc("ROC")
6170
metrics_list = [auc_metric]
6271
return metrics_list, metrics_list_name
6372

6473
# construct train forward phase
6574
def train_forward(self, dy_model, metrics_list, batch_data, config):
66-
labels, sparse_tensor, dense_tensor = self.create_feeds(batch_data,config)
75+
label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
76+
config)
6777

68-
raw = dy_model(sparse_tensor, None)
78+
raw_pred_2d = dy_model.forward(sparse_tensor, dense_tensor)
79+
loss = self.create_loss(raw_pred_2d, label)
80+
# update metrics
81+
predict_2d = paddle.nn.functional.softmax(raw_pred_2d)
82+
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
6983

70-
loss = paddle.nn.functional.cross_entropy(input=raw, label=paddle.cast(labels, "float32"), soft_label=True)
71-
correct = metrics_list[0].compute(raw, labels)
72-
metrics_list[0].update(correct)
73-
loss = paddle.mean(loss)
84+
# print_dict format :{'loss': loss}
7485
print_dict = None
7586
return loss, metrics_list, print_dict
7687

7788
def infer_forward(self, dy_model, metrics_list, batch_data, config):
7889
label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
7990
config)
80-
raw = dy_model(sparse_tensor, None)
81-
correct = metrics_list[0].compute(raw, label)
82-
metrics_list[0].update(correct)
83-
return metrics_list, None
91+
92+
raw_pred_2d = dy_model.forward(sparse_tensor, dense_tensor)
93+
# update metrics
94+
predict_2d = paddle.nn.functional.softmax(raw_pred_2d)
95+
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
96+
return metrics_list, None

0 commit comments

Comments
 (0)