Skip to content

Commit cc1239f

Browse files
authored
Update some doc about API reference. (#11495)
* Update some doc about layers' API. * Fix format. * Fix example bug in random_data_generator. * Fix example bug in dropout. * Follow comments and some small fix for some examples.
1 parent fd77126 commit cc1239f

File tree

6 files changed

+104
-59
lines changed

6 files changed

+104
-59
lines changed

paddle/fluid/operators/activation_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ Sigmoid Activation Operator
112112
__attribute__((unused)) constexpr char LogSigmoidDoc[] = R"DOC(
113113
Logsigmoid Activation Operator
114114
115-
$$out = \log \frac{1}{1 + e^{-x}}$$
115+
$$out = \\log \\frac{1}{1 + e^{-x}}$$
116116
117117
)DOC";
118118

paddle/fluid/operators/detection/box_coder_op.cc

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -106,23 +106,36 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker {
106106
"and M represents the number of deocded boxes.");
107107

108108
AddComment(R"DOC(
109-
Bounding Box Coder Operator.
109+
110+
Bounding Box Coder.
111+
110112
Encode/Decode the target bounding box with the priorbox information.
113+
111114
The Encoding schema described below:
112-
ox = (tx - px) / pw / pxv
113-
oy = (ty - py) / ph / pyv
114-
ow = log(abs(tw / pw)) / pwv
115-
oh = log(abs(th / ph)) / phv
115+
116+
ox = (tx - px) / pw / pxv
117+
118+
oy = (ty - py) / ph / pyv
119+
120+
ow = log(abs(tw / pw)) / pwv
121+
122+
oh = log(abs(th / ph)) / phv
123+
116124
The Decoding schema described below:
117-
ox = (pw * pxv * tx * + px) - tw / 2
118-
oy = (ph * pyv * ty * + py) - th / 2
119-
ow = exp(pwv * tw) * pw + tw / 2
120-
oh = exp(phv * th) * ph + th / 2
121-
where tx, ty, tw, th denote the target box's center coordinates, width and
122-
height respectively. Similarly, px, py, pw, ph denote the priorbox's(anchor)
123-
center coordinates, width and height. pxv, pyv, pwv, phv denote the variance
124-
of the priorbox and ox, oy, ow, oh denote the encoded/decoded coordinates,
125-
width and height.
125+
126+
ox = (pw * pxv * tx * + px) - tw / 2
127+
128+
oy = (ph * pyv * ty * + py) - th / 2
129+
130+
ow = exp(pwv * tw) * pw + tw / 2
131+
132+
oh = exp(phv * th) * ph + th / 2
133+
134+
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, width
135+
and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote the
136+
priorbox's (anchor) center coordinates, width and height. `pxv`, `pyv`, `pwv`,
137+
`phv` denote the variance of the priorbox and `ox`, `oy`, `ow`, `oh` denote the
138+
encoded/decoded coordinates, width and height.
126139
)DOC");
127140
}
128141
};

paddle/fluid/operators/gaussian_random_batch_size_like_op.cc

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3636
void Apply() override {
3737
AddAttr<float>("mean",
3838
"(float, default 0.0) "
39-
"mean of random tensor.")
39+
"The mean (or center) of the gaussian distribution.")
4040
.SetDefault(.0f);
4141
AddAttr<float>("std",
4242
"(float, default 1.0) "
43-
"std of random tensor.")
43+
"The standard deviation (std, or spread) of the "
44+
"gaussian distribution.")
4445
.SetDefault(1.0f);
4546
AddAttr<int>("seed",
4647
"(int, default 0) "
@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
5556
.SetDefault(framework::proto::VarType::FP32);
5657

5758
AddComment(R"DOC(
58-
GaussianRandom Operator.
5959
6060
Used to initialize tensors with gaussian random generator.
61+
The defalut mean of the distribution is 0. and defalut standard
62+
deviation (std) of the distribution is 1.. Uers can set mean and std
63+
by input arguments.
6164
)DOC");
6265
}
6366
};

python/paddle/fluid/layers/io.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -378,16 +378,16 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
378378
Variable: A Reader Variable from which we can get random data.
379379
380380
Examples:
381-
.. code-block:: python
382381
383-
reader = fluid.layers.io.random_data_generator(
384-
low=0.0,
385-
high=1.0,
386-
shapes=[(3,224,224), (1)],
387-
lod_levels=[0, 0])
382+
.. code-block:: python
388383
389-
# Via the reader, we can use 'read_file' layer to get data:
390-
image, label = fluid.layers.io.read_file(reader)
384+
reader = fluid.layers.random_data_generator(
385+
low=0.0,
386+
high=1.0,
387+
shapes=[[3,224,224], [1]],
388+
lod_levels=[0, 0])
389+
# Via the reader, we can use 'read_file' layer to get data:
390+
image, label = fluid.layers.read_file(reader)
391391
"""
392392
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
393393
shape_concat = []

python/paddle/fluid/layers/nn.py

Lines changed: 44 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -364,8 +364,7 @@ def dynamic_lstm(input,
364364
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
365365
"tanh", "relu", "identity"], default "tanh".
366366
candidate_activation(str): The activation for candidate hidden state.
367-
Choices = ["sigmoid", "tanh",
368-
"relu", "identity"],
367+
Choices = ["sigmoid", "tanh", "relu", "identity"],
369368
default "tanh".
370369
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
371370
name(str|None): A name for this layer(optional). If set None, the layer
@@ -540,27 +539,31 @@ def dynamic_lstmp(input,
540539
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
541540
"tanh", "relu", "identity"], default "tanh".
542541
candidate_activation(str): The activation for candidate hidden state.
543-
Choices = ["sigmoid", "tanh",
544-
"relu", "identity"],
542+
Choices = ["sigmoid", "tanh", "relu", "identity"],
545543
default "tanh".
546544
proj_activation(str): The activation for projection output.
547-
Choices = ["sigmoid", "tanh",
548-
"relu", "identity"],
545+
Choices = ["sigmoid", "tanh", "relu", "identity"],
549546
default "tanh".
550547
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
551548
name(str|None): A name for this layer(optional). If set None, the layer
552549
will be named automatically.
553550
554551
Returns:
555-
tuple: The projection of hidden state, and cell state of LSTMP. The \
556-
shape of projection is (T x P), for the cell state which is \
557-
(T x D), and both LoD is the same with the `input`.
552+
tuple: A tuple of two output variable: the projection of hidden state, \
553+
and cell state of LSTMP. The shape of projection is (T x P), \
554+
for the cell state which is (T x D), and both LoD is the same \
555+
with the `input`.
558556
559557
Examples:
558+
560559
.. code-block:: python
561560
561+
dict_dim, emb_dim = 128, 64
562+
data = fluid.layers.data(name='sequence', shape=[1],
563+
dtype='int32', lod_level=1)
564+
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
562565
hidden_dim, proj_dim = 512, 256
563-
fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4,
566+
fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4,
564567
act=None, bias_attr=None)
565568
proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out,
566569
size=hidden_dim * 4,
@@ -626,10 +629,10 @@ def dynamic_gru(input,
626629
candidate_activation='tanh',
627630
h_0=None):
628631
"""
629-
**Dynamic GRU Layer**
632+
**Gated Recurrent Unit (GRU) Layer**
630633
631634
Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on
632-
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_
635+
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ .
633636
634637
The formula is as follows:
635638
@@ -676,17 +679,25 @@ def dynamic_gru(input,
676679
Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid".
677680
candidate_activation(str): The activation for candidate hidden state.
678681
Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh".
679-
h_0 (Variable): The hidden output of the first time step.
682+
h_0 (Variable): This is initial hidden state. If not set, default is
683+
zero. This is a tensor with shape (N x D), where N is the number of
684+
total time steps of input mini-batch feature and D is the hidden
685+
size.
680686
681687
Returns:
682688
Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \
683-
and lod is the same with the input.
689+
and sequence length is the same with the input.
684690
685691
Examples:
692+
686693
.. code-block:: python
687694
695+
dict_dim, emb_dim = 128, 64
696+
data = fluid.layers.data(name='sequence', shape=[1],
697+
dtype='int32', lod_level=1)
698+
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
688699
hidden_dim = 512
689-
x = fluid.layers.fc(input=data, size=hidden_dim * 3)
700+
x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
690701
hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim)
691702
"""
692703

@@ -924,13 +935,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
924935
925936
Drop or keep each element of `x` independently. Dropout is a regularization
926937
technique for reducing overfitting by preventing neuron co-adaption during
927-
training. The dropout operator randomly set (according to the given dropout
938+
training. The dropout operator randomly sets (according to the given dropout
928939
probability) the outputs of some units to zero, while others are remain
929940
unchanged.
930941
931942
Args:
932-
x (Variable): The input tensor.
933-
dropout_prob (float): Probability of setting units to zero.
943+
x (Variable): The input tensor variable.
944+
dropout_prob (float): Probability of setting units to zero.
934945
is_test (bool): A flag indicating whether it is in test phrase or not.
935946
seed (int): A Python integer used to create random seeds. If this
936947
parameter is set to None, a random seed is used.
@@ -940,13 +951,14 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
940951
will be named automatically.
941952
942953
Returns:
943-
Variable: A tensor variable.
954+
Variable: A tensor variable is the shape with `x`.
944955
945956
Examples:
957+
946958
.. code-block:: python
947959
948-
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
949-
droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
960+
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
961+
droped = fluid.layers.dropout(x, dropout_prob=0.5)
950962
"""
951963

952964
helper = LayerHelper('dropout', **locals())
@@ -2990,32 +3002,33 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
29903002
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
29913003
29923004
.. math::
2993-
y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
3005+
3006+
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
29943007
29953008
For `x` with more dimensions, this layer independently normalizes each 1-D
29963009
slice along dimension `axis`.
29973010
29983011
Args:
29993012
x(Variable|list): The input tensor to l2_normalize layer.
3000-
axis(int): The axis on which to apply normalization. If `axis < 0`,
3013+
axis(int): The axis on which to apply normalization. If `axis < 0`, \
30013014
the dimension to normalization is rank(X) + axis. -1 is the
30023015
last dimension.
3003-
epsilon(float): The epsilon value is used to avoid division by zero,
3016+
epsilon(float): The epsilon value is used to avoid division by zero, \
30043017
the defalut value is 1e-10.
3005-
name(str|None): A name for this layer(optional). If set None, the layer
3018+
name(str|None): A name for this layer(optional). If set None, the layer \
30063019
will be named automatically.
30073020
3008-
30093021
Returns:
3010-
Variable: The output tensor variable.
3022+
Variable: The output tensor variable is the same shape with `x`.
30113023
30123024
Examples:
3025+
30133026
.. code-block:: python
30143027
3015-
data = fluid.layers.data(name="data",
3016-
shape=(3, 17, 13),
3017-
dtype="float32")
3018-
normed = fluid.layers.l2_normalize(x=data, axis=1)
3028+
data = fluid.layers.data(name="data",
3029+
shape=(3, 17, 13),
3030+
dtype="float32")
3031+
normed = fluid.layers.l2_normalize(x=data, axis=1)
30193032
"""
30203033

30213034
if len(x.shape) == 1:

python/paddle/fluid/layers/tensor.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -497,11 +497,27 @@ def save_combine(x, file_path, overwrite=True):
497497
Saves a list of variables into a single file.
498498
499499
Args:
500-
x(list): A list of Tensor/LoDTensor to be saved together in a single file.
500+
x(list): A list of Tensor/LoDTensor variables to be saved together in
501+
a single file.
501502
file_path(str): The file path where variables will be saved.
502-
overwrite(bool): Whether or not cover the given file when it has already
503+
overwrite(bool): Whether or not cover the given file when it has already
503504
existed. If it's set 'False' and the file is existed, a runtime
504505
error will be thrown.
506+
507+
Returns:
508+
There is no return value.
509+
510+
Examples:
511+
512+
.. code-block:: python
513+
514+
v1 = fluid.layers.data(name="data",
515+
shape=(4, 6),
516+
dtype="float32")
517+
v2 = fluid.layers.data(name="data",
518+
shape=(6, 8, 4),
519+
dtype="float32")
520+
normed = fluid.layers.save_combine([v1, v2], file_path="output")
505521
"""
506522
helper = LayerHelper("save_combine", **locals())
507523
helper.append_op(

0 commit comments

Comments
 (0)