Skip to content

Commit c2e8f40

Browse files
authored
Merge pull request #11492 from dzhwinter/doc/api1
[API Reference] fix some typo in layers
2 parents 20e5ef6 + f4a49cb commit c2e8f40

File tree

6 files changed

+111
-25
lines changed

6 files changed

+111
-25
lines changed

paddle/fluid/operators/clip_by_norm_op.cc

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,19 @@ be linearly scaled to make the L2 norm of $Out$ equal to $max\_norm$, as
5454
shown in the following formula:
5555
5656
$$
57-
Out = \frac{max\_norm * X}{norm(X)},
57+
Out = \\frac{max\\_norm * X}{norm(X)},
5858
$$
5959
6060
where $norm(X)$ represents the L2 norm of $X$.
61+
62+
Examples:
63+
.. code-block:: python
64+
65+
data = fluid.layer.data(
66+
name='data', shape=[2, 4, 6], dtype='float32')
67+
reshaped = fluid.layers.clip_by_norm(
68+
x=data, max_norm=0.5)
69+
6170
)DOC");
6271
}
6372
};

paddle/fluid/operators/uniform_random_batch_size_like_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,10 @@ class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3535
protected:
3636
void Apply() override {
3737
AddComment(R"DOC(
38-
Uniform random operator
38+
UniformRandomBatchSizeLike operator.
3939
4040
This operator initializes a tensor with the same batch_size as the Input tensor
41-
with random values sampled from a uniform distribution.
41+
with random values sampled from a uniform distribution.
4242
4343
)DOC");
4444
AddAttr<float>("min",

python/paddle/fluid/framework.py

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,6 +1034,37 @@ def clone_variable(self, var):
10341034

10351035

10361036
class Program(object):
1037+
"""
1038+
Python Program. Beneath it is a ProgramDesc, which is used for
1039+
create c++ Program. A program is a self-contained programing
1040+
language like container. It has at least one Block, when the
1041+
control flow op like conditional_block, while_op is included,
1042+
it will contains nested block.
1043+
Please reference the framework.proto for details.
1044+
1045+
Notes: we have default_startup_program and default_main_program
1046+
by default, a pair of them will shared the parameters.
1047+
The default_startup_program only run once to initialize parameters,
1048+
default_main_program run in every minibatch and adjust the weights.
1049+
1050+
Args:
1051+
None
1052+
1053+
Returns:
1054+
Python Program
1055+
1056+
Examples:
1057+
.. code-block:: python
1058+
1059+
main_program = Program()
1060+
startup_program = Program()
1061+
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
1062+
fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
1063+
fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
1064+
fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu")
1065+
1066+
"""
1067+
10371068
def __init__(self):
10381069
self.desc = core.ProgramDesc()
10391070
self.blocks = [Block(self, 0)]
@@ -1099,6 +1130,8 @@ def get_desc(self):
10991130

11001131
def clone(self, for_test=False):
11011132
"""Clone the Program object
1133+
Args:
1134+
for_test(bool): indicate whether clone for test.
11021135
11031136
Set for_test to False when we want to clone the program for training.
11041137
Set for_test to True when we want to clone the program for testing.
@@ -1109,8 +1142,9 @@ def clone(self, for_test=False):
11091142
the is_test attributes in these operators will be set to True for
11101143
testing purposes, otherwise, they remain unchanged.
11111144
1112-
Returns(Program):
1113-
The cloned Program object.
1145+
Returns:
1146+
Program: The cloned Program object.
1147+
11141148
"""
11151149
if for_test:
11161150
p = self.inference_optimize()
@@ -1228,6 +1262,7 @@ def sync_with_cpp(self):
12281262
def copy_param_info_from(self, other):
12291263
"""
12301264
Copy the information of parameters from other program.
1265+
12311266
Args:
12321267
other(Program): Other program
12331268
@@ -1246,6 +1281,7 @@ def copy_param_info_from(self, other):
12461281
def copy_data_info_from(self, other):
12471282
"""
12481283
Copy the information of data variables from other program.
1284+
12491285
Args:
12501286
other(Program): Other program
12511287
@@ -1299,6 +1335,7 @@ def __str__(self):
12991335
def to_string(self, throw_on_error, with_details=False):
13001336
"""
13011337
To debug string.
1338+
13021339
Args:
13031340
throw_on_error(bool): raise exception when self is not initialized
13041341
when throw_on_error is True

python/paddle/fluid/layers/control_flow.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -902,8 +902,7 @@ def increment(x, value=1.0, in_place=True):
902902
in_place (bool): If the increment should be performed in-place.
903903
904904
Returns:
905-
Variable: The tensor variable storing the transformation of
906-
element-wise increment of each value in the input.
905+
Variable: The elementwise-incremented object.
907906
908907
Examples:
909908
.. code-block:: python
@@ -945,7 +944,7 @@ def array_write(x, i, array=None):
945944
Variable: The output LOD_TENSOR_ARRAY where the input tensor is written.
946945
947946
Examples:
948-
.. code-block::python
947+
.. code-block:: python
949948
950949
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
951950
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)

python/paddle/fluid/layers/metric.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,32 @@
2727

2828
def accuracy(input, label, k=1, correct=None, total=None):
2929
"""
30+
accuracy layer.
31+
Refer to the https://en.wikipedia.org/wiki/Precision_and_recall
32+
3033
This function computes the accuracy using the input and label.
31-
The output is the top k inputs and their indices.
34+
If the correct label occurs in top k predictions, then correct will increment by one.
35+
Note: the dtype of accuracy is determined by input. the input and label dtype can be different.
36+
37+
Args:
38+
input(Variable): The input of accuracy layer, which is the predictions of network.
39+
Carry LoD information is supported.
40+
label(Variable): The label of dataset.
41+
k(int): The top k predictions for each class will be checked.
42+
correct(Variable): The correct predictions count.
43+
total(Variable): The total entries count.
44+
45+
Returns:
46+
Variable: The correct rate.
47+
48+
Examples:
49+
.. code-block:: python
50+
51+
data = fluid.layers.data(name="data", shape=[-1, 32, 32], dtype="float32")
52+
label = fluid.layers.data(name="data", shape=[-1,1], dtype="int32")
53+
predict = fluid.layers.fc(input=data, size=10)
54+
acc = fluid.layers.accuracy(input=predict, label=label, k=5)
55+
3256
"""
3357
helper = LayerHelper("accuracy", **locals())
3458
topk_out, topk_indices = nn.topk(input, k=k)

python/paddle/fluid/layers/nn.py

Lines changed: 33 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -794,11 +794,14 @@ def linear_chain_crf(input, label, param_attr=None):
794794
795795
Args:
796796
input(${emission_type}): ${emission_comment}
797+
input(${transition_type}): ${transition_comment}
797798
label(${label_type}): ${label_comment}
798799
param_attr(ParamAttr): The attribute of the learnable parameter.
799800
800801
Returns:
801-
${log_likelihood_comment}
802+
output(${emission_exps_type}): ${emission_exps_comment} \n
803+
output(${transition_exps_type}): ${transition_exps_comment} \n
804+
output(${log_likelihood_type}): ${log_likelihood_comment}
802805
803806
"""
804807
helper = LayerHelper('linear_chain_crf', **locals())
@@ -1131,10 +1134,6 @@ def sequence_conv(input,
11311134
Variable: output of sequence_conv
11321135
"""
11331136

1134-
# FIXME(dzh) : want to unify the argument of python layer
1135-
# function. So we ignore some unecessary attributes.
1136-
# such as, padding_trainable, context_start.
1137-
11381137
helper = LayerHelper('sequence_conv', **locals())
11391138
dtype = helper.input_dtype()
11401139
filter_shape = [filter_size * input.shape[1], num_filters]
@@ -2068,15 +2067,37 @@ def layer_norm(input,
20682067

20692068
def beam_search_decode(ids, scores, name=None):
20702069
"""
2071-
${beam_search_decode}
2070+
Beam Search Decode
2071+
2072+
This layers is to pack the output of beam search layer into sentences and
2073+
associated scores. It is usually called after the beam search layer.
2074+
Typically, the output of beam search layer is a tensor of selected ids, with
2075+
a tensor of the score of each id. Beam search layer's output ids, however,
2076+
are generated directly during the tree search, and they are stacked by each
2077+
level of the search tree. Thus we need to reorganize them into sentences,
2078+
based on the score of each id. This layer takes the output of beam search
2079+
layer as input and repack them into sentences.
20722080
20732081
Args:
2074-
ids (Variable): ${ids_comment}
2075-
scores (Variable): ${scores_comment}
2082+
ids (Variable): The selected ids, output of beam search layer.
2083+
scores (Variable): The associated scores of the ids, out put of beam
2084+
search layer.
20762085
name (str): The name of this layer. It is optional.
20772086
20782087
Returns:
2079-
tuple: a tuple of two output variable: sentence_ids, sentence_scores
2088+
tuple(Variable): a tuple of two output tensors: sentence_ids, sentence_scores.
2089+
sentence_ids is a tensor with shape [size, length], where size is the
2090+
beam size of beam search, and length is the length of each sentence.
2091+
Note that the length of sentences may vary.
2092+
sentence_scores is a tensor with the same shape as sentence_ids.
2093+
2094+
Examples:
2095+
.. code-block:: python
2096+
2097+
ids, scores = fluid.layers.beam_search(
2098+
pre_ids, ids, scores, beam_size, end_id)
2099+
sentence_ids, sentence_scores = fluid.layers.beam_search_decode(
2100+
ids, scores)
20802101
"""
20812102
helper = LayerHelper('beam_search_decode', **locals())
20822103
sentence_ids = helper.create_tmp_variable(dtype=ids.dtype)
@@ -2957,7 +2978,7 @@ def split(input, num_or_sections, dim=-1, name=None):
29572978
will be named automatically.
29582979
29592980
Returns:
2960-
List: The list of segmented tensor variables.
2981+
list(Variable): The list of segmented tensor variables.
29612982
29622983
Examples:
29632984
.. code-block:: python
@@ -3690,8 +3711,6 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None):
36903711
36913712
Examples:
36923713
3693-
As an example:
3694-
36953714
.. code-block:: text
36963715
36973716
Given:
@@ -3735,7 +3754,7 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None):
37353754
37363755
output.lod = [[4, 4]]
37373756
3738-
The simple usage is:
3757+
Examples:
37393758
37403759
.. code-block:: python
37413760
@@ -4220,9 +4239,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None):
42204239
42214240
.. math::
42224241
4223-
Output(i, x, y) = Input(i, x, y) / \left(
4224-
k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)}
4225-
(Input(j, x, y))^2 \right)^{\beta}
4242+
Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C, c + n/2)}_{j = \\max(0, c - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
42264243
42274244
In the above equation:
42284245

0 commit comments

Comments
 (0)