Skip to content

Commit b0b26da

Browse files
abhinavarorawangkuiyi
authored andcommitted
Polish operator documentation (#5356)
* Polish the documentation for uniform_random and top_k ops * Polishing more operators
1 parent 7408a4c commit b0b26da

File tree

9 files changed

+113
-87
lines changed

9 files changed

+113
-87
lines changed

paddle/operators/save_op.cc

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -163,14 +163,19 @@ class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker {
163163
SaveOpProtoMaker(framework::OpProto *proto,
164164
framework::OpAttrChecker *op_checker)
165165
: OpProtoAndCheckerMaker(proto, op_checker) {
166-
AddInput("X", "The tensor need to be saved");
167-
AddComment(R"DOC(Save operator
168-
Save operator will serialize and write a tensor variable to disk file.
166+
AddInput("X", "(Tensor ) Input tensor to be saved");
167+
AddComment(R"DOC(
168+
Save operator
169+
170+
This operator will serialize and write a tensor variable to file on disk.
169171
)DOC");
170-
AddAttr<bool>("overwrite", "Overwrite the output file if exist")
172+
AddAttr<bool>("overwrite",
173+
"(boolean, default true)"
174+
"Overwrite the output file if exist")
171175
.SetDefault(true);
172176
AddAttr<std::string>("file_path",
173-
"Variable will be saved to \"file_path\".")
177+
"(string)"
178+
"The \"file_path\" where the variable will be saved.")
174179
.AddCustomChecker(
175180
[](const std::string &path) { return !path.empty(); });
176181
}

paddle/operators/scale_op.cc

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,16 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
4040
public:
4141
ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
4242
: OpProtoAndCheckerMaker(proto, op_checker) {
43-
AddInput("X", "The input tensor of scale operator.");
44-
AddOutput("Out", "The output tensor of scale operator.");
45-
AddComment(R"DOC(Scale operator
43+
AddInput("X", "(Tensor) Input tensor of scale operator.");
44+
AddOutput("Out", "(Tensor) Output tensor of scale operator.");
45+
AddComment(R"DOC(
46+
Scale operator
4647
47-
The equation is: Out = scale*X
48+
$$Out = scale*X$$
4849
)DOC");
49-
AddAttr<AttrType>("scale", "The scaling factor of the scale operator.")
50+
AddAttr<AttrType>("scale",
51+
"(float, default 0)"
52+
"The scaling factor of the scale operator.")
5053
.SetDefault(1.0);
5154
}
5255
};

paddle/operators/sequence_concat_op.cc

Lines changed: 35 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -47,19 +47,19 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
4747
framework::OpAttrChecker* op_checker)
4848
: OpProtoAndCheckerMaker(proto, op_checker) {
4949
AddInput("X",
50-
"(A vector of LoDTensor), the input is a vector of LoDTensor, "
50+
"(vector<LoDTensor>) Input is a vector of LoDTensor, "
5151
"each of which is a variable-length sequence or nested sequence.")
5252
.AsDuplicable();
5353
AddOutput("Out",
54-
"(A LoDTensor), the variable-length output of "
54+
"(LoDTensor), Variable-length output of "
5555
"sequence_concat Op.");
5656
AddAttr<int>("axis",
57-
"(int, default 0)"
58-
"The axis which the inputs will be joined with. "
57+
"(int, default 0) "
58+
"The axis along which the inputs will be joined. "
5959
"If axis is 0, the inputs will be joined with LoD index.")
6060
.SetDefault(0);
6161
AddAttr<int>("level",
62-
"(int, default 0)"
62+
"(int, default 0) "
6363
"The level at which the inputs will be joined. "
6464
"If the level is 0, the inputs will be joined at the nested "
6565
"sequence level. "
@@ -68,34 +68,36 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
6868
"The level should be less than the level number of inputs.")
6969
.SetDefault(0);
7070
AddComment(R"DOC(
71-
The sequence_concat operator concatenates multiple LoDTensors.
72-
It only supports sequence (LoD Tensor with level number is 1)
73-
or a nested sequence (LoD tensor with level number is 2) as its input.
74-
- Case1:
75-
If the axis is other than 0(here, axis is 1 and level is 1),
76-
each input should have the same LoD information and the LoD
77-
information of the output keeps the same as the input.
78-
79-
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
80-
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
81-
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
82-
83-
- Case2:
84-
If the axis is 0(here, leve is 0), the inputs are concatenated along
85-
time steps, the LoD information of the output need to re-compute.
86-
87-
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
88-
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
89-
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
90-
91-
- Case3:
92-
If the axis is 0(here, level is 1).
93-
94-
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
95-
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
96-
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)
97-
98-
NOTE: The levels of all the inputs should be the same.
71+
Sequence Concat operator
72+
73+
The sequence_concat operator concatenates multiple LoDTensors.
74+
It only supports sequence (LoD Tensor with level number is 1)
75+
or a nested sequence (LoD tensor with level number is 2) as its input.
76+
- Case1:
77+
If the axis is other than 0(here, axis is 1 and level is 1),
78+
each input should have the same LoD information and the LoD
79+
information of the output keeps the same as the input.
80+
81+
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
82+
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
83+
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
84+
85+
- Case2:
86+
If the axis is 0(here, leve is 0), the inputs are concatenated along
87+
time steps, the LoD information of the output need to re-compute.
88+
89+
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
90+
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
91+
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
92+
93+
- Case3:
94+
If the axis is 0(here, level is 1).
95+
96+
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
97+
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
98+
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)
99+
100+
NOTE: The levels of all the inputs should be the same.
99101
)DOC");
100102
}
101103
};

paddle/operators/sgd_op.cc

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,17 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
4545
public:
4646
SGDOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
4747
: OpProtoAndCheckerMaker(proto, op_checker) {
48-
AddInput("Param", "Input parameter");
49-
AddInput("LearningRate", "Learning rate of SGD");
50-
AddInput("Grad", "Input gradient");
51-
AddOutput("ParamOut", "output parameter");
48+
AddInput("Param", "(Tensor) Input parameter");
49+
AddInput("LearningRate", "(Tensor) Learning rate of SGD");
50+
AddInput("Grad", "(Tensor) Input gradient");
51+
AddOutput("ParamOut", "(Tensor) Output parameter");
5252
AddComment(R"DOC(
5353
54-
Simplest sgd algorithm.
54+
SGD operator
5555
56-
param_out = param - learning_rate * grad;
56+
This operator implements one step of the stochastic gradient descent algorithm.
57+
58+
$$param_out = param - learning_rate * grad$$
5759
5860
)DOC");
5961
}

paddle/operators/sign_op.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,10 @@ class SignOpMaker : public framework::OpProtoAndCheckerMaker {
3838
: OpProtoAndCheckerMaker(proto, op_checker) {
3939
AddInput("X", "(Tensor) Input tensor of sign operator.");
4040
AddOutput("Out", "(Tensor) Output tensor of sign operator.");
41-
AddComment(R"DOC(Sign operator
41+
AddComment(R"DOC(
42+
Sign operator
4243
43-
The equation is: Out = X.sign()
44+
$$Out = X.sign()$$
4445
)DOC");
4546
}
4647
};

paddle/operators/split_op.cc

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -67,30 +67,38 @@ class SplitOpMaker : public framework::OpProtoAndCheckerMaker {
6767
public:
6868
SplitOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
6969
: OpProtoAndCheckerMaker(proto, op_checker) {
70-
AddInput("X", "the input tensor of split operator.");
71-
AddOutput("Out", "the output tensors of split operator.").AsDuplicable();
70+
AddInput("X", "(Tensor) Input tensor of the split operator.");
71+
AddOutput("Out", "(Tensor) Output tensors of the split operator.")
72+
.AsDuplicable();
7273
AddComment(R"DOC(
73-
Split the input tensor into multiple sub-tensors.
74-
Example:
75-
Input = [[1,2],
76-
[3,4],
77-
[5,6]]
78-
sections = [2,1]
79-
axis = 0
80-
Output[0] = [[1,2],
81-
[3,4]]
82-
Output[1] = [[5,6]]
74+
Split operator
75+
76+
This operator splits the input tensor into multiple sub-tensors.
77+
78+
Example:
79+
Input = [[1,2],
80+
[3,4],
81+
[5,6]]
82+
sections = [2,1]
83+
axis = 0
84+
Output[0] = [[1,2],
85+
[3,4]]
86+
Output[1] = [[5,6]]
8387
8488
)DOC");
8589
AddAttr<std::vector<int>>("sections",
86-
"the length for each"
87-
"output along with the specify axis.")
90+
"(vector<int>) "
91+
"the length of each output along the "
92+
"specified axis.")
8893
.SetDefault(std::vector<int>{});
8994
AddAttr<int>("num",
90-
"number of the sub-tensors, it must evenly divide "
95+
"(int, default 0)"
96+
"Number of sub-tensors. This must evenly divide "
9197
"Input.dims()[axis]")
9298
.SetDefault(0);
93-
AddAttr<int>("axis", "The axis which the input will be splited on.")
99+
AddAttr<int>("axis",
100+
"(int, default 0) "
101+
"The axis which the input will be splited on.")
94102
.SetDefault(0);
95103
}
96104
};

paddle/operators/squared_l2_distance_op.cc

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -59,23 +59,26 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker {
5959
SquaredL2DistanceOpMaker(framework::OpProto* proto,
6060
framework::OpAttrChecker* op_checker)
6161
: OpProtoAndCheckerMaker(proto, op_checker) {
62-
AddInput("X", "Input of SquaredL2DistanceOp.");
63-
AddInput("Y", "Target of SquaredL2DistanceOp.");
62+
AddInput("X", "(Tensor) Input of SquaredL2DistanceOp.");
63+
AddInput("Y", "(Tensor) Target of SquaredL2DistanceOp.");
6464
AddOutput("sub_result",
65-
"Buffering substraction result which "
65+
"(Tensor) Buffering subtraction result which "
6666
"will be reused in backward.")
6767
.AsIntermediate();
68-
AddOutput("Out", "Squared l2 distance between input and target.");
68+
AddOutput("Out", "(Tensor) Squared l2 distance between input and target.");
6969
AddComment(R"DOC(
70-
SquaredL2DistanceOp will cacluate the squared L2 distance for
71-
input and target. Number of distance value equals to the
72-
first dimension of input. First dimension of target could be equal to
73-
input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp
74-
will broadcast target's first dimension to input's first dimension.
75-
You can decide whether calculate the gradient of input and target.
76-
77-
Both the input X and Y can carry the LoD (Level of Details) information,
78-
or not. But the output only shares the LoD with input X.
70+
SquaredL2Distance operator
71+
72+
This operator will cacluate the squared L2 distance for the input and
73+
the target. Number of distance value will be equal to the first dimension
74+
of input. First dimension of the target could be equal to the input or to 1.
75+
If the first dimension of target is 1, the operator will broadcast target's
76+
first dimension to input's first dimension. During backward propagation,
77+
the user can decide whether to calculate the gradient of the input or
78+
the target or both.
79+
80+
Both the input X and Y can carry the LoD (Level of Details) information.
81+
However, the output only shares the LoD information with input X.
7982
)DOC");
8083
}
8184
};

paddle/operators/squared_l2_norm_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,13 +52,13 @@ class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker {
5252
framework::OpAttrChecker* op_checker)
5353
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
5454
AddInput("X", "(Tensor) The input of squared_l2_norm op.");
55-
AddOutput("Out", "(Float) The output of squared_l2_norm op.");
55+
AddOutput("Out", "(Scalar) The output of squared_l2_norm op.");
5656
AddComment(R"DOC(
5757
SquaredL2Norm Operator.
5858
5959
Computes the squared L2 norm of a tensor.
6060
61-
Out = sum (X ** 2)
61+
$$Out = \sum_{i} X_{i}^2$$
6262
6363
)DOC");
6464
}

paddle/operators/sum_op.cc

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,15 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
4545
public:
4646
SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
4747
: OpProtoAndCheckerMaker(proto, op_checker) {
48-
AddInput("X", "the input tensors of sum operator.").AsDuplicable();
49-
AddOutput("Out", "the output tensor of sum operator.");
48+
AddInput("X", "(vector<Tensor>) The input tensors of sum operator.")
49+
.AsDuplicable();
50+
AddOutput("Out", "(Tensor) The output tensor of sum operator.");
5051
AddComment(R"DOC(
51-
Sum the input tensors.
52+
Sum operator.
5253
53-
All the inputs can carry the LoD (Level of Details) information,
54-
or not. But the output only shares the LoD with the first input.
54+
This operators sums the input tensors. All the inputs can carry the
55+
LoD (Level of Details) information. However, the output only shares
56+
the LoD information with the first input.
5557
)DOC");
5658
}
5759
};

0 commit comments

Comments
 (0)