We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 332112a commit fa1e0baCopy full SHA for fa1e0ba
examples/text_generation/couplet/model.py
@@ -23,8 +23,8 @@ def __init__(self):
23
super(CrossEntropyCriterion, self).__init__()
24
25
def forward(self, predict, label, trg_mask):
26
- cost = F.softmax_with_cross_entropy(
27
- logits=predict, label=label, soft_label=False)
+ cost = F.cross_entropy(
+ input=predict, label=label, reduction='none', soft_label=False)
28
cost = paddle.squeeze(cost, axis=[2])
29
masked_cost = cost * trg_mask
30
batch_mean_cost = paddle.mean(masked_cost, axis=[0])
examples/text_generation/couplet/train.py
@@ -14,10 +14,7 @@
14
15
from args import parse_args
16
17
-import numpy as np
18
import paddle
19
-import paddle.nn as nn
20
-import paddle.nn.functional as F
21
from paddlenlp.metrics import Perplexity
22
from data import create_train_loader
examples/text_generation/vae-seq2seq/model.py
@@ -40,8 +40,8 @@ def forward(self, kl_loss, dec_output, trg_mask, label):
40
self.update_kl_weight()
41
self.kl_loss = kl_loss
42
43
- rec_loss = F.softmax_with_cross_entropy(
44
- logits=dec_output, label=label, soft_label=False)
+ rec_loss = F.cross_entropy(
+ input=dec_output, label=label, reduction='none', soft_label=False)
45
46
rec_loss = paddle.squeeze(rec_loss, axis=[2])
47
rec_loss = rec_loss * trg_mask
@@ -117,7 +117,9 @@ def __init__(self, ppl, nll, log_freq=200, verbose=2):
117
118
def on_train_begin(self, logs=None):
119
super(TrainCallback, self).on_train_begin(logs)
120
- self.train_metrics = ["loss", "ppl", "nll", "kl weight", "kl loss", "rec loss"]
+ self.train_metrics = [
121
+ "loss", "ppl", "nll", "kl weight", "kl loss", "rec loss"
122
+ ]
123
124
def on_epoch_begin(self, epoch=None, logs=None):
125
super(TrainCallback, self).on_epoch_begin(epoch, logs)
0 commit comments