Skip to content

Commit 9328c3c

Browse files
authored
Merge pull request #11308 from reyoung/feature/polish_api_ref
Simplize API Reference Documentation
2 parents 17b42fc + dd26329 commit 9328c3c

12 files changed

+152
-123
lines changed

paddle/fluid/operators/batch_size_like.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,18 +54,18 @@ class BatchSizeLikeOp : public framework::OperatorWithKernel {
5454
class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker {
5555
public:
5656
void Make() final {
57-
AddInput("Input",
58-
"(Tensor) Tensor "
59-
"whose input_dim_idx'th dimension specifies the batch_size");
57+
AddInput(
58+
"Input",
59+
"Tensor whose input_dim_idx'th dimension specifies the batch_size");
6060
AddOutput("Out",
61-
"(Tensor) Tensor of specified shape will be filled "
61+
"Tensor of specified shape will be filled "
6262
"with the specified value");
63-
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
63+
AddAttr<std::vector<int>>("shape", "The shape of the output");
6464
AddAttr<int>("input_dim_idx",
65-
"(int, default 0) The index of input's batch size dimension")
65+
"default 0. The index of input's batch size dimension")
6666
.SetDefault(0);
6767
AddAttr<int>("output_dim_idx",
68-
"(int, default 0) The index of output's batch size dimension")
68+
"default 0. The index of output's batch size dimension")
6969
.SetDefault(0);
7070
Apply();
7171
}

paddle/fluid/operators/bilinear_interp_op.cc

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,16 @@ class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker {
5656
public:
5757
void Make() override {
5858
AddInput("X",
59-
"(Tensor) The input tensor of bilinear interpolation, "
59+
"The input tensor of bilinear interpolation, "
6060
"This is a 4-D tensor with shape of (N x C x h x w)");
6161
AddInput("OutSize",
62-
"(Tensor) This is a 1-D tensor with two number. "
62+
"This is a 1-D tensor with two number. "
6363
"The first number is height and the second number is width.")
6464
.AsDispensable();
65-
AddOutput("Out",
66-
"(Tensor) The dimension of output is (N x C x out_h x out_w]");
65+
AddOutput("Out", "The dimension of output is (N x C x out_h x out_w)");
6766

68-
AddAttr<int>("out_h", "(int) output height of bilinear interpolation op.");
69-
AddAttr<int>("out_w", "(int) output width of bilinear interpolation op.");
67+
AddAttr<int>("out_h", "output height of bilinear interpolation op.");
68+
AddAttr<int>("out_w", "output width of bilinear interpolation op.");
7069
AddComment(R"DOC(
7170
Bilinear interpolation is an extension of linear interpolation for
7271
interpolating functions of two variables (e.g. H-direction and

paddle/fluid/operators/fill_constant_batch_size_like_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,16 @@ class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp {
3232
class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3333
protected:
3434
void Apply() override {
35-
AddAttr<int>("dtype",
36-
"(int, default 5 (FP32)) "
37-
"Output data type")
35+
AddAttr<int>(
36+
"dtype",
37+
"It could be numpy.dtype. Output data type. Default is float32")
3838
.SetDefault(framework::proto::VarType::FP32);
39-
AddAttr<float>("value", "(float, default 0) The value to be filled")
39+
AddAttr<float>("value", "default 0. The value to be filled")
4040
.SetDefault(0.0f);
4141
AddComment(R"DOC(
42-
FillConstantBatchSizeLike Operator.
43-
44-
Fill up a variable with specified constant value.
42+
This function creates a tensor of specified *shape*, *dtype* and batch size,
43+
and initializes this with a constant supplied in *value*. The batch size is
44+
obtained from the `input` tensor.
4545
4646
)DOC");
4747
}

paddle/fluid/operators/linear_chain_crf_op.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,6 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker {
6767
"mini-batch. Note: S is equal to the sequence number in a mini-batch. "
6868
"The output is no longer a LoDTensor.");
6969
AddComment(R"DOC(
70-
LinearChainCRF Operator.
71-
7270
Conditional Random Field defines an undirected probabilistic graph with nodes
7371
denoting random variables and edges denoting dependencies between these
7472
variables. CRF learns the conditional probability $P(Y|X)$, where

paddle/fluid/operators/load_op.cc

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -74,25 +74,18 @@ class LoadOp : public framework::OperatorBase {
7474
class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker {
7575
public:
7676
void Make() override {
77-
AddOutput("Out", "(Tensor) The tensor need to be loaded");
77+
AddOutput("Out", "The tensor need to be loaded");
7878
AddAttr<bool>(
7979
"load_as_fp16",
80-
"(boolean, default false)"
8180
"If true, the tensor will be first loaded and then "
8281
"converted to float16 data type. Otherwise, the tensor will be "
83-
"directly loaded without data type conversion.")
82+
"directly loaded without data type conversion. Default is false.")
8483
.SetDefault(false);
8584
AddAttr<std::string>("file_path",
86-
"(string) "
87-
"Variable will be loaded from \"file_path\".")
85+
R"(Variable will be loaded from "file_path")")
8886
.AddCustomChecker(
8987
[](const std::string &path) { return !path.empty(); });
90-
AddComment(R"DOC(
91-
Load Operator.
92-
93-
Load operator will load a tensor variable from disk file.
94-
95-
)DOC");
88+
AddComment("Load operator will load a tensor variable from disk file.");
9689
}
9790
};
9891
} // namespace operators

paddle/fluid/operators/max_sequence_len_op.cc

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,15 @@ class MaxSeqenceLenOp : public framework::OperatorBase {
4242
class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
4343
public:
4444
void Make() override {
45-
AddInput("RankTable", "The lod_rank_table.");
46-
AddOutput("Out", "The max sequence length.");
47-
AddComment(
48-
R"DOC(Calculate the max sequence length through lod_rank_table.)DOC");
45+
AddInput("RankTable", "Input variable which is a LoDRankTable object");
46+
AddOutput("Out", "The max sequence length");
47+
AddComment(R"DOC(
48+
Given a LoDRankTable object, this layer returns the max length of
49+
a batch of sequences. In fact, a LoDRankTable object contains a list of
50+
tuples(<sequence index, sequence length>) and the list is already sorted by
51+
sequence length in descending order, so the operator just returns the
52+
sequence length of the first tuple element
53+
)DOC");
4954
}
5055
};
5156

python/paddle/fluid/layers/control_flow.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414
import contextlib
1515

16-
from layer_function_generator import autodoc
16+
from layer_function_generator import autodoc, templatedoc
1717
from tensor import assign, fill_constant
1818
from .. import core
1919
from ..framework import Program, Variable, Operator
@@ -721,26 +721,22 @@ def lod_rank_table(x, level=0):
721721
return table
722722

723723

724+
@templatedoc()
724725
def max_sequence_len(rank_table):
725-
"""Max Sequence Len Operator. Given a LoDRankTable object, this layer
726-
returns the max length of a batch of sequences. In fact, a LoDRankTable
727-
object contains a list of tuples(<sequence index, sequence length>) and
728-
the list is already sorted by sequence length in descending order, so the
729-
operator just returns the sequence length of the first tuple element.
726+
"""
727+
${comment}
728+
729+
>>> import paddle.fluid as fluid
730+
>>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
731+
>>> lod_level=1)
732+
>>> rank_table = layers.lod_rank_table(x=x, level=0)
733+
>>> max_seq_len = layers.max_sequence_len(rank_table)
730734
731735
Args:
732-
rank_table (Variable): Input variable which is a LoDRankTable object.
736+
rank_table(${rank_table_type}): ${rank_table_comment}.
733737
734738
Returns:
735-
Variable: The max length of sequence.
736-
737-
Examples:
738-
.. code-block:: python
739-
740-
x = fluid.layers.data(name='x', shape=[10],
741-
dtype='float32', lod_level=1)
742-
rank_table = layers.lod_rank_table(x=x, level=0)
743-
max_seq_len = layers.max_sequence_len(rank_table)
739+
${out_comment}.
744740
"""
745741
helper = LayerHelper("max_seqence_len", **locals())
746742
res = helper.create_tmp_variable(dtype="int64")

python/paddle/fluid/layers/io.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@
1919
from control_flow import BlockGuard
2020
from ..layer_helper import LayerHelper
2121
from ..executor import global_scope
22+
from layer_function_generator import generate_layer_fn, templatedoc
2223

2324
__all__ = [
2425
'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file',
2526
'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
26-
'random_data_generator', 'Preprocessor'
27+
'random_data_generator', 'Preprocessor', 'load'
2728
]
2829

2930

@@ -662,3 +663,29 @@ def __call__(self, *args, **kwargs):
662663
"sink_var_names": self.sink_var_names
663664
})
664665
return monkey_patch_reader_methods(self.reader)
666+
667+
668+
@templatedoc()
669+
def load(out, file_path, load_as_fp16=None):
670+
"""
671+
${comment}
672+
673+
>>> import paddle.fluid as fluid
674+
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
675+
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
676+
677+
Args:
678+
out(${out_type}): ${out_comment}.
679+
680+
file_path(${file_path_type}): ${file_path_comment}.
681+
682+
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
683+
684+
Returns:
685+
None
686+
"""
687+
helper = LayerHelper("load", **locals())
688+
attrs = {"file_path": file_path}
689+
if load_as_fp16 is not None:
690+
attrs['load_as_fp16'] = load_as_fp16
691+
helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)

python/paddle/fluid/layers/layer_function_generator.py

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,10 @@ def __impl__(func):
224224
return __impl__
225225

226226

227-
def templatedoc():
227+
_inline_math_single_dollar = re.compile(r"\$([^\$]+)\$")
228+
229+
230+
def templatedoc(op_type=None):
228231
"""
229232
Decorator of layer function. It will use the docstring from the layer
230233
function as the template. The template arguments are:
@@ -238,32 +241,47 @@ def templatedoc():
238241
Decorated function.
239242
"""
240243

244+
def trim_ending_dot(msg):
245+
return msg.rstrip('.')
246+
247+
def escape_inline_math(msg):
248+
return _inline_math_single_dollar.sub(repl=r':math:`\1`', string=msg)
249+
241250
def __impl__(func):
242-
op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
251+
if op_type is None:
252+
op_type_name = func.__name__
253+
else:
254+
op_type_name = op_type
255+
op_proto = OpProtoHolder.instance().get_op_proto(op_type_name)
243256
tmpl = string.Template(func.__doc__)
244257

245258
comment_lines = op_proto.comment.split("\n")
246259
comment = ""
247260
for line in comment_lines:
248-
line = line.lstrip()
249-
comment += line
250-
comment += "\n"
251-
252-
args = {"comment": comment}
261+
line = line.strip()
262+
if len(line) != 0:
263+
comment += escape_inline_math(line)
264+
comment += " "
265+
elif len(comment) != 0:
266+
comment += "\n \n "
267+
268+
args = {"comment": trim_ending_dot(comment)}
253269
for each_input in op_proto.inputs:
254270
input_name = _convert_(each_input.name)
255-
args["{0}_comment".format(input_name)] = each_input.comment
271+
args["{0}_comment".format(input_name)] = trim_ending_dot(
272+
each_input.comment)
256273
args["{0}_type".format(input_name)] = "Variable"
257274
for each_attr in op_proto.attrs:
258275
input_name = _convert_(each_attr.name)
259-
args["{0}_comment".format(input_name)] = each_attr.comment
276+
args["{0}_comment".format(input_name)] = trim_ending_dot(
277+
each_attr.comment)
260278
args["{0}_type".format(input_name)] = _type_to_str_(each_attr.type)
261279

262280
for each_opt in op_proto.outputs:
263281
output_name = _convert_(each_opt.name)
264-
args["{0}_comment".format(output_name)] = each_opt.comment
282+
args["{0}_comment".format(output_name)] = trim_ending_dot(
283+
each_opt.comment)
265284
args["{0}_type".format(output_name)] = "Variable"
266-
267285
func.__doc__ = tmpl.substitute(args)
268286
return func
269287

python/paddle/fluid/layers/learning_rate_scheduler.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,14 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
"""
15+
When training a model, it's often useful to decay the
16+
learning rate during training process, this is called
17+
learning_rate_decay. There are many strategies to do
18+
this, this module will provide some classical method.
19+
User can also implement their own learning_rate_decay
20+
strategy according to this module.
21+
"""
1422

1523
import control_flow
1624
import nn
@@ -22,14 +30,6 @@
2230
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
2331
'polynomial_decay', 'piecewise_decay', 'noam_decay'
2432
]
25-
"""
26-
When training a model, it's often useful to decay the
27-
learning rate during training process, this is called
28-
learning_rate_decay. There are many strategies to do
29-
this, this module will provide some classical method.
30-
User can also implement their own learning_rate_decay
31-
strategy according to this module.
32-
"""
3333

3434

3535
def _decay_step_counter(begin=0):
@@ -41,18 +41,20 @@ def _decay_step_counter(begin=0):
4141

4242

4343
def noam_decay(d_model, warmup_steps):
44-
"""Apply decay to learning rate.
45-
```python
46-
lr_value = np.power(d_model, -0.5) * np.min([
47-
np.power(current_steps, -0.5),
48-
np.power(warmup_steps, -1.5) * current_steps
49-
])
50-
```
44+
"""
45+
Noam decay method. The numpy implementation of noam decay as follows.
46+
47+
>>> import numpy as np
48+
>>> lr_value = np.power(d_model, -0.5) * np.min([
49+
>>> np.power(current_steps, -0.5),
50+
>>> np.power(warmup_steps, -1.5) * current_steps])
51+
52+
Please reference `attention is all you need
53+
<https://arxiv.org/pdf/1706.03762.pdf>`_.
5154
5255
Args:
5356
d_model(Variable): The dimensionality of input and output of model.
54-
Reference: attention is all you need
55-
https://arxiv.org/pdf/1706.03762.pdf
57+
5658
warmup_steps(Variable): A super parameter.
5759
5860
Returns:

0 commit comments

Comments
 (0)