Skip to content

Commit a523626

Browse files
committed
Refine doc for smooth l1 loss op.
1 parent 4ecbab4 commit a523626

File tree

1 file changed

+36
-26
lines changed

1 file changed

+36
-26
lines changed

paddle/operators/smooth_l1_loss_op.cc

Lines changed: 36 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -22,22 +22,20 @@ class SmoothL1LossOp : public framework::OperatorWithKernel {
2222
using framework::OperatorWithKernel::OperatorWithKernel;
2323

2424
void InferShape(framework::InferShapeContext* ctx) const override {
25-
PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized.");
26-
PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
25+
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
26+
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null.");
2727

2828
auto x_dims = ctx->GetInputDim("X");
2929
auto y_dims = ctx->GetInputDim("Y");
30-
PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same.");
30+
PADDLE_ENFORCE_EQ(x_dims, y_dims);
3131
PADDLE_ENFORCE_GE(x_dims.size(), 2,
32-
"The tensor rank of X must be at least 2.");
32+
"The tensor rank of Input(X) should not be less than 2.");
3333
if (ctx->HasInput("InsideWeight")) {
3434
PADDLE_ENFORCE(ctx->HasInput("OutsideWeight"),
3535
"If weights are provided, must specify both "
3636
"inside and outside weights.");
37-
PADDLE_ENFORCE_EQ(ctx->GetInputDim("InsideWeight"), x_dims,
38-
"The shape of InsideWeight must be same as X.");
39-
PADDLE_ENFORCE_EQ(ctx->GetInputDim("OutsideWeight"), x_dims,
40-
"The shape of OutsideWeight must be same as X.");
37+
PADDLE_ENFORCE_EQ(ctx->GetInputDim("InsideWeight"), x_dims);
38+
PADDLE_ENFORCE_EQ(ctx->GetInputDim("OutsideWeight"), x_dims);
4139
}
4240

4341
ctx->SetOutputDim("Diff", x_dims);
@@ -53,41 +51,53 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker {
5351
framework::OpAttrChecker* op_checker)
5452
: OpProtoAndCheckerMaker(proto, op_checker) {
5553
AddInput("X",
56-
"The input tensor of smooth l1 loss op."
57-
"The rank should be greater or equal to 2 with shape "
58-
"[batch_size, value_dim1, value_dim2, ..., value_dimN]");
54+
"(Tensor, default Tensor<float>) A tensor with rank at least 2. "
55+
"The input value of smooth l1 loss op with shape "
56+
"[batch_size, dim1, ..., dimN].");
5957
AddInput("Y",
60-
"The target tensor of smooth l1 loss op "
61-
"with the same shape as X.");
58+
"(Tensor, default Tensor<float>) A tensor with rank at least 2. "
59+
"The target value of smooth l1 loss op with same shape as X.");
6260
AddInput("InsideWeight",
63-
"Optional input tensor of smooth l1 loss op with the same shape "
64-
"as X. If provided, the result of (X - Y) will be multiplied "
61+
"(Tensor, default Tensor<float>) A tensor with rank at least 2. "
62+
"This input is optional and should have same shape with X. "
63+
"If provided, the result of (X - Y) will be multiplied "
6564
"by this tensor element by element.")
6665
.AsDispensable();
6766
AddInput("OutsideWeight",
68-
"Optinal input of smooth l1 loss op with the same shape as X."
69-
"If provided, the output smooth l1 loss will be multiplied by "
70-
"this tensor element by element.")
67+
"(Tensor, default Tensor<float>) A tensor with rank at least 2. "
68+
"This input is optional and should have same shape with X. "
69+
"If provided, the out smooth l1 loss will be multiplied by this "
70+
"tensor element by element.")
7171
.AsDispensable();
72-
AddOutput("Diff", "Intermediate variable to cache InsideWeight*(X-Y).")
72+
AddOutput("Diff", "Intermediate variable to cache InsideWeight * (X - Y).")
7373
.AsIntermediate();
74-
AddOutput("Out", "Smooth l1 loss.");
74+
AddOutput("Out",
75+
"(Tensor, default Tensor<float>) A tensor with rank be 2. "
76+
"The output smooth l1 loss with shape [batch_size, 1].");
7577
AddAttr<AttrType>("sigma",
7678
"Hyper parameter of smooth l1 loss op."
7779
"A float scalar with default value 3.0.")
7880
.SetDefault(3.0);
7981
AddComment(R"DOC(
8082
Smooth L1 Loss Operator.
8183
82-
This operator computes the smooth l1 loss for input and target.
83-
The operator takes the first dimension of input as the batch size.
84+
This operator computes the smooth l1 loss for X and Y.
85+
The operator takes the first dimension of X and Y as batch size.
8486
For each instance, it computes the smooth l1 loss element by element first
85-
and then sums all the losses. So the resulting output shape
86-
is [batch_size, 1].
87+
and then sums all the losses. So the shape of Out is [batch_size, 1].
8788
8889
The equation is:
89-
loss = $$0.5 * (\sigma * (x-y))^2$$ if $$|x - y| < 1 /({\sigma}^2)$$
90-
$$\frac{|x - y| - 0.5}{{\sigma}^2}$$ otherwise
90+
$$
91+
Out_{\sigma}(X, Y)_i = \begin{cases}
92+
0.5 * (\sigma * (X_i - Y_i)) ^ 2
93+
\quad |X_i - Y_i| \lt \frac{1} {{\sigma} ^ 2} \\
94+
\frac{|X_i - Y_i| - 0.5}{{\sigma}^2},
95+
\quad otherwise
96+
\end{cases}
97+
$$
98+
99+
In the above equation, $Out_{\sigma}(X, Y)_i$, $X_i$ and $Y_i$ represent the ith
100+
element of Out, X and Y.
91101
92102
)DOC");
93103
}

0 commit comments

Comments
 (0)