Skip to content

Commit b000e0d

Browse files
committed
Simplize API Reference Documentation
1 parent 259e63d commit b000e0d

File tree

8 files changed

+84
-80
lines changed

8 files changed

+84
-80
lines changed

paddle/fluid/operators/batch_size_like.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,18 +54,18 @@ class BatchSizeLikeOp : public framework::OperatorWithKernel {
5454
class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker {
5555
public:
5656
void Make() final {
57-
AddInput("Input",
58-
"(Tensor) Tensor "
59-
"whose input_dim_idx'th dimension specifies the batch_size");
57+
AddInput(
58+
"Input",
59+
"Tensor whose input_dim_idx'th dimension specifies the batch_size");
6060
AddOutput("Out",
61-
"(Tensor) Tensor of specified shape will be filled "
61+
"Tensor of specified shape will be filled "
6262
"with the specified value");
63-
AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
63+
AddAttr<std::vector<int>>("shape", "The shape of the output");
6464
AddAttr<int>("input_dim_idx",
65-
"(int, default 0) The index of input's batch size dimension")
65+
"default 0. The index of input's batch size dimension")
6666
.SetDefault(0);
6767
AddAttr<int>("output_dim_idx",
68-
"(int, default 0) The index of output's batch size dimension")
68+
"default 0. The index of output's batch size dimension")
6969
.SetDefault(0);
7070
Apply();
7171
}

paddle/fluid/operators/fill_constant_batch_size_like_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,16 @@ class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp {
3232
class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3333
protected:
3434
void Apply() override {
35-
AddAttr<int>("dtype",
36-
"(int, default 5 (FP32)) "
37-
"Output data type")
35+
AddAttr<int>(
36+
"dtype",
37+
"It could be numpy.dtype. Output data type. Default is float32")
3838
.SetDefault(framework::proto::VarType::FP32);
39-
AddAttr<float>("value", "(float, default 0) The value to be filled")
39+
AddAttr<float>("value", "default 0. The value to be filled")
4040
.SetDefault(0.0f);
4141
AddComment(R"DOC(
42-
FillConstantBatchSizeLike Operator.
43-
44-
Fill up a variable with specified constant value.
42+
This function creates a tensor of specified *shape*, *dtype* and batch size,
43+
and initializes this with a constant supplied in *value*. The batch size is
44+
obtained from the `input` tensor.
4545
4646
)DOC");
4747
}

paddle/fluid/operators/load_op.cc

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -74,25 +74,18 @@ class LoadOp : public framework::OperatorBase {
7474
class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker {
7575
public:
7676
void Make() override {
77-
AddOutput("Out", "(Tensor) The tensor need to be loaded");
77+
AddOutput("Out", "The tensor need to be loaded");
7878
AddAttr<bool>(
7979
"load_as_fp16",
80-
"(boolean, default false)"
8180
"If true, the tensor will be first loaded and then "
8281
"converted to float16 data type. Otherwise, the tensor will be "
83-
"directly loaded without data type conversion.")
82+
"directly loaded without data type conversion. Default is false.")
8483
.SetDefault(false);
8584
AddAttr<std::string>("file_path",
86-
"(string) "
87-
"Variable will be loaded from \"file_path\".")
85+
R"(Variable will be loaded from "file_path")")
8886
.AddCustomChecker(
8987
[](const std::string &path) { return !path.empty(); });
90-
AddComment(R"DOC(
91-
Load Operator.
92-
93-
Load operator will load a tensor variable from disk file.
94-
95-
)DOC");
88+
AddComment("Load operator will load a tensor variable from disk file.");
9689
}
9790
};
9891
} // namespace operators

paddle/fluid/operators/max_sequence_len_op.cc

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,15 @@ class MaxSeqenceLenOp : public framework::OperatorBase {
4242
class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
4343
public:
4444
void Make() override {
45-
AddInput("RankTable", "The lod_rank_table.");
46-
AddOutput("Out", "The max sequence length.");
47-
AddComment(
48-
R"DOC(Calculate the max sequence length through lod_rank_table.)DOC");
45+
AddInput("RankTable", "Input variable which is a LoDRankTable object");
46+
AddOutput("Out", "The max sequence length");
47+
AddComment(R"DOC(
48+
Given a LoDRankTable object, this layer returns the max length of
49+
a batch of sequences. In fact, a LoDRankTable object contains a list of
50+
tuples(<sequence index, sequence length>) and the list is already sorted by
51+
sequence length in descending order, so the operator just returns the
52+
sequence length of the first tuple element
53+
)DOC");
4954
}
5055
};
5156

python/paddle/fluid/layers/control_flow.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414
import contextlib
1515

16-
from layer_function_generator import autodoc
16+
from layer_function_generator import autodoc, templatedoc
1717
from tensor import assign, fill_constant
1818
from .. import core
1919
from ..framework import Program, Variable, Operator
@@ -721,26 +721,22 @@ def lod_rank_table(x, level=0):
721721
return table
722722

723723

724+
@templatedoc()
724725
def max_sequence_len(rank_table):
725-
"""Max Sequence Len Operator. Given a LoDRankTable object, this layer
726-
returns the max length of a batch of sequences. In fact, a LoDRankTable
727-
object contains a list of tuples(<sequence index, sequence length>) and
728-
the list is already sorted by sequence length in descending order, so the
729-
operator just returns the sequence length of the first tuple element.
726+
"""
727+
${comment}
728+
729+
>>> import paddle.fluid as fluid
730+
>>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
731+
>>> lod_level=1)
732+
>>> rank_table = layers.lod_rank_table(x=x, level=0)
733+
>>> max_seq_len = layers.max_sequence_len(rank_table)
730734
731735
Args:
732-
rank_table (Variable): Input variable which is a LoDRankTable object.
736+
rank_table(${rank_table_type}): ${rank_table_comment}.
733737
734738
Returns:
735-
Variable: The max length of sequence.
736-
737-
Examples:
738-
.. code-block:: python
739-
740-
x = fluid.layers.data(name='x', shape=[10],
741-
dtype='float32', lod_level=1)
742-
rank_table = layers.lod_rank_table(x=x, level=0)
743-
max_seq_len = layers.max_sequence_len(rank_table)
739+
(${out_type}): ${out_comment}
744740
"""
745741
helper = LayerHelper("max_seqence_len", **locals())
746742
res = helper.create_tmp_variable(dtype="int64")

python/paddle/fluid/layers/io.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@
1919
from control_flow import BlockGuard
2020
from ..layer_helper import LayerHelper
2121
from ..executor import global_scope
22+
from layer_function_generator import generate_layer_fn, templatedoc
2223

2324
__all__ = [
2425
'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file',
2526
'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
26-
'random_data_generator', 'Preprocessor'
27+
'random_data_generator', 'Preprocessor', 'load'
2728
]
2829

2930

@@ -662,3 +663,29 @@ def __call__(self, *args, **kwargs):
662663
"sink_var_names": self.sink_var_names
663664
})
664665
return monkey_patch_reader_methods(self.reader)
666+
667+
668+
@templatedoc()
669+
def load(out, file_path, load_as_fp16=None):
670+
"""
671+
${comment}
672+
673+
>>> import paddle.fluid as fluid
674+
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
675+
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
676+
677+
Args:
678+
out(${out_type}): ${out_comment}.
679+
680+
file_path(${file_path_type}): ${file_path_comment}.
681+
682+
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
683+
684+
Returns:
685+
None
686+
"""
687+
helper = LayerHelper("load", **locals())
688+
attrs = {"file_path": file_path}
689+
if load_as_fp16 is not None:
690+
attrs['load_as_fp16'] = load_as_fp16
691+
helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)

python/paddle/fluid/layers/layer_function_generator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,6 @@ def __impl__(func):
263263
output_name = _convert_(each_opt.name)
264264
args["{0}_comment".format(output_name)] = each_opt.comment
265265
args["{0}_type".format(output_name)] = "Variable"
266-
267266
func.__doc__ = tmpl.substitute(args)
268267
return func
269268

python/paddle/fluid/layers/tensor.py

Lines changed: 17 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from ..framework import Variable
1919
from ..initializer import Constant, force_init_on_cpu
2020
from ..core import VarDesc
21+
from layer_function_generator import templatedoc
2122
import numpy
2223

2324
__all__ = [
@@ -266,37 +267,36 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
266267
return out
267268

268269

270+
@templatedoc()
269271
def fill_constant_batch_size_like(input,
270272
shape,
271273
dtype,
272274
value,
273275
input_dim_idx=0,
274276
output_dim_idx=0):
275277
"""
276-
**fill_constant_batch_size_like**
277-
278-
This function creates a tensor of specified *shape*, *dtype* and batch size,
279-
and initializes this with a constant supplied in *value*. The batch size is
280-
obtained from the `input` tensor.
278+
${comment}
281279
282280
It also sets *stop_gradient* to True.
283281
282+
>>> data = fluid.layers.fill_constant_batch_size_like(
283+
>>> input=like, shape=[1], value=0, dtype='int64')
284+
284285
Args:
285-
input(Variable): Tensor whose dimensions will be used to get batch size
286-
shape(tuple|list|None): Shape of output tensor
287-
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
288-
value(float): Constant value to initialize the output tensor
289-
input_dim_idx(int): Index of input's batch size dimension
290-
output_dim_idx(int): Index of output's batch size dimension
286+
input(${input_type}): ${input_comment}.
291287
292-
Returns:
293-
Variable: The tensor variable storing the output
288+
shape(${shape_type}): ${shape_comment}.
294289
295-
Examples:
296-
.. code-block:: python
290+
dtype(${dtype_type}): ${dtype_comment}.
291+
292+
value(${value_type}): ${value_comment}.
297293
298-
data = fluid.layers.fill_constant_batch_size_like(
299-
input=like, shape=[1], value=0, dtype='int64')
294+
input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.
295+
296+
output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.
297+
298+
Returns:
299+
${out_comment}
300300
"""
301301
helper = LayerHelper("fill_constant_batch_size_like", **locals())
302302
out = helper.create_tmp_variable(dtype=dtype)
@@ -437,22 +437,6 @@ def save_combine(x, file_path, overwrite=True):
437437
"overwrite": overwrite})
438438

439439

440-
def load(out, file_path):
441-
"""
442-
Loads a variable from a given file.
443-
444-
Args:
445-
out(variable): The variable to be read from the disk file.
446-
file_path(str): The path of the disk file.
447-
"""
448-
helper = LayerHelper("load", **locals())
449-
helper.append_op(
450-
type="load",
451-
inputs={},
452-
output={"Out": out},
453-
args={"file_path": file_path})
454-
455-
456440
def load_combine(out, file_path):
457441
"""
458442
Loads a list of vairables from a single file.

0 commit comments

Comments
 (0)