Skip to content

Commit e56fd43

Browse files
committed
fix statement. test=develop
1 parent 0c8351e commit e56fd43

File tree

2 files changed

+14
-12
lines changed

2 files changed

+14
-12
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func',
220220
paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1546136806fef5c08f6918544bd9151d'))
221221
paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', '2f6ff96864054a31aa4bb659c6722c99'))
222222
paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '431a4301c35032166ec029f7432c80a7'))
223-
paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '74112f07e2329448f9f583cabd9d681e'))
223+
paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '776d536cac47c89073abc7ee524d5aec'))
224224
paddle.fluid.layers.tree_conv (ArgSpec(args=['nodes_vector', 'edge_set', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '34ea12ac9f10a65dccbc50100d12e607'))
225225
paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '33bbd42027d872b3818b3d64ec52e139'))
226226
paddle.fluid.layers.open_files (ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)), ('document', 'b1ae2e1cc0750e58726374061ea90ecc'))

paddle/fluid/operators/kldiv_loss_op.cc

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,11 @@ class KLDivLossOpMaker : public framework::OpProtoAndCheckerMaker {
6565
public:
6666
void Make() override {
6767
AddInput("X",
68-
"The input tensor of KL divergence loss operator, "
69-
"This is a tensor with shape of [N, *], where N is the"
68+
"The input tensor of KL divergence loss operator. "
69+
"This is a tensor with shape of [N, *], where N is the "
7070
"batch size, * means any number of additional dimensions.");
7171
AddInput("Target",
72-
"The tensor of KL divergence loss operator, "
72+
"The tensor of KL divergence loss operator. "
7373
"This is a tensor with shape of Input(X).");
7474
AddOutput(
7575
"Loss",
@@ -82,29 +82,31 @@ class KLDivLossOpMaker : public framework::OpProtoAndCheckerMaker {
8282
"The reduction type to apply to the output, available types "
8383
"are 'none' | 'batchmean' | 'mean' | 'sum', 'none' for no "
8484
"reduction, 'batchmean' for the sum of output divided by "
85-
"batch size, 'mean' for the average valud of all output, "
85+
"batch size, 'mean' for the average value of all output, "
8686
"'sum' for the sum of the output.")
8787
.SetDefault("mean");
8888

8989
AddComment(R"DOC(
9090
This operator calculates the Kullback-Leibler divergence loss
9191
between Input(X) and Input(Target).
9292
93-
KL divergence loss calculates as follows:
93+
KL divergence loss is calculated as follows:
9494
95-
$$l(x, y) = y * (\log y - x)$$
95+
$$l(x, y) = y * (\log(y) - x)$$
96+
97+
While :math:`x` is Input(X) and :math:`y` is Input(Target).
9698
9799
While :attr:`reduction` is :attr:`none`, output loss is in
98-
same shape with Input(X), loss in each point is calculated
99-
seperately and no reduction applied.
100+
the same shape as Input(X), loss in each point is calculated
101+
seperately and no reduction is applied.
100102
101-
While :attr:`reduction` is :attr:`mean`, output loss in in
103+
While :attr:`reduction` is :attr:`mean`, output loss is in
102104
shape of [1] and loss value is the mean value of all losses.
103105
104-
While :attr:`reduction` is :attr:`sum`, output loss in in
106+
While :attr:`reduction` is :attr:`sum`, output loss is in
105107
shape of [1] and loss value is the sum value of all losses.
106108
107-
While :attr:`reduction` is :attr:`batchmean`, output loss in
109+
While :attr:`reduction` is :attr:`batchmean`, output loss is
108110
in shape of [1] and loss value is the sum value of all losses
109111
divided by batch size.
110112

0 commit comments

Comments
 (0)