Skip to content

Commit cbc1b7f

Browse files
committed
Polish documentation
1 parent 674327a commit cbc1b7f

File tree

8 files changed

+144
-64
lines changed

8 files changed

+144
-64
lines changed

paddle/fluid/operators/activation_op.cc

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
275275
"The value of threshold for HardShrink. [default: 0.5]")
276276
.SetDefault(0.5f);
277277
AddComment(R"DOC(
278-
HardShrink Activation Operator.
278+
** HardShrink activation operator **
279279
280280
.. math::
281281
out = \begin{cases}
@@ -399,13 +399,12 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
399399
AddComment(R"DOC(
400400
ThresholdedRelu Activation Operator.
401401
402-
$$
403-
out = \begin{cases}
404-
x, \text{if } x > threshold \\
405-
0, \text{otherwise}
406-
\end{cases}
407-
$$
402+
.. math::
408403
404+
out = \begin{cases}
405+
x, \text{if } x > threshold \\
406+
0, \text{otherwise}
407+
\end{cases}
409408
)DOC");
410409
}
411410
};

paddle/fluid/operators/compare_op.cc

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,16 +34,15 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
3434
.SetDefault(true);
3535
AddOutput("Out", string::Sprintf("n-dim bool tensor. Each element is %s",
3636
comment.equation));
37-
AddComment(string::Sprintf(R"DOC(%s Operator
38-
37+
AddComment(string::Sprintf(R"DOC(
3938
It operates element-wise on X and Y, and returns the Out. Each of them is a
4039
N-dim tensor. X and Y could be any type. The each element of the Out tensor is
4140
calculated by $%s$
4241
)DOC",
43-
comment.type, comment.equation));
44-
AddAttr<int>("axis",
45-
"(int, default -1). The start dimension index "
46-
"for broadcasting Y onto X.")
42+
comment.equation));
43+
AddAttr<int>(
44+
"axis",
45+
"The start dimension index for broadcasting Y onto X. [default -1]")
4746
.SetDefault(-1)
4847
.EqualGreaterThan(-1);
4948
}

paddle/fluid/operators/multiplex_op.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,9 @@ the (Ids[i])-th tensor.
9696
9797
For i-th row of the output tensor:
9898
99-
$ y[i] = x_{k}[i] $
99+
$$
100+
y[i] = x_{k}[i]
101+
$$
100102
101103
where $y$ is the output tensor, $x_{k}$ is the k-th input tensor,
102104
and $k = Ids[i]$.

paddle/fluid/operators/row_conv_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
9494
"in this LodTensor is a matrix with shape T x N, i.e., the "
9595
"same shape as X.");
9696
AddComment(R"DOC(
97-
Row-convolution Operator.
97+
** Row-convolution operator **
9898
9999
The row convolution is called lookahead convolution. This operator was
100100
introduced in the following paper for DeepSpeech2:

python/paddle/fluid/layers/control_flow.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1008,8 +1008,28 @@ def array_read(array, i):
10081008

10091009
def shrink_memory(x, i, table):
10101010
"""
1011-
This function creates an operator to shrink_rnn_memory using the RankTable
1011+
This function creates an operator to shrink rnn memory using the RankTable
10121012
as mentioned in the input parameter.
1013+
1014+
NOTE: This API is very low-level API. It is used by DynamicRNN only.
1015+
1016+
Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
1017+
will be sorted by order, and the length of valid memory will be shrink after
1018+
each time step.
1019+
1020+
Args:
1021+
x(Variable): The memory object in the previous time step.
1022+
i(Variable): The step count variable. A int scalar as LoDTensor.
1023+
table(Variable): The RNNRankTable object.
1024+
1025+
Returns:
1026+
the memory variable after shrink.
1027+
1028+
Examples:
1029+
1030+
Since this API is very low level API. The example is not provided.
1031+
Please reference the implementation of class DynamicRNN for detail
1032+
usage.
10131033
"""
10141034
helper = LayerHelper('shrink_memory', **locals())
10151035
out = helper.create_tmp_variable(dtype=x.dtype)
@@ -1316,10 +1336,9 @@ def __call__(self):
13161336

13171337
class DynamicRNN(object):
13181338
"""
1319-
Dynamic RNN.
1320-
1321-
This RNN can process a batch of sequence data. The length of each sample
1322-
sequence can be different. This API automatically process them in batch.
1339+
The dynamic RNN can process a batch of sequence data. The length of each
1340+
sample sequence can be different. This API automatically process them in
1341+
batch.
13231342
13241343
The input lod must be set. Please reference `lod_tensor`
13251344
@@ -1500,7 +1519,7 @@ def memory(self,
15001519
need_reorder=False,
15011520
dtype='float32'):
15021521
"""
1503-
Create a memory variable.
1522+
Create a memory variable for dynamic rnn.
15041523
15051524
If the :code:`init` is not None, :code:`memory` will be initialized by
15061525
this variable. The :code:`need_reorder` is used to reorder the memory as

python/paddle/fluid/layers/detection.py

Lines changed: 48 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -210,53 +210,68 @@ def bipartite_match(dist_matrix,
210210
dist_threshold=None,
211211
name=None):
212212
"""
213-
**Bipartite matchint operator**
214-
215-
This operator is a greedy bipartite matching algorithm, which is used to
216-
obtain the matching with the maximum distance based on the input
213+
This operator implements a greedy bipartite matching algorithm, which is
214+
used to obtain the matching with the maximum distance based on the input
217215
distance matrix. For input 2D matrix, the bipartite matching algorithm can
218-
find the matched column for each row, also can find the matched row for
219-
each column. And this operator only calculate matched indices from column
220-
to row. For each instance, the number of matched indices is the number of
221-
of columns of the input ditance matrix.
222-
223-
There are two outputs to save matched indices and distance.
224-
A simple description, this algothrim matched the best (maximum distance)
216+
find the matched column for each row (matched means the largest distance),
217+
also can find the matched row for each column. And this operator only
218+
calculate matched indices from column to row. For each instance,
219+
the number of matched indices is the column number of the input distance
220+
matrix.
221+
222+
There are two outputs, matched indices and distance.
223+
A simple description, this algorithm matched the best (maximum distance)
225224
row entity to the column entity and the matched indices are not duplicated
226225
in each row of ColToRowMatchIndices. If the column entity is not matched
227226
any row entity, set -1 in ColToRowMatchIndices.
228227
229-
Please note that the input DistMat can be LoDTensor (with LoD) or Tensor.
228+
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
230229
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
231230
If Tensor, the height of ColToRowMatchIndices is 1.
232231
232+
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
233+
layer. Please consider to use :code:`ssd_loss` instead.
234+
233235
Args:
234236
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
235237
[K, M]. It is pair-wise distance matrix between the entities
236238
represented by each row and each column. For example, assumed one
237239
entity is A with shape [K], another entity is B with shape [M]. The
238-
dist_matirx[i][j] is the distance between A[i] and B[j]. The bigger
239-
the distance is, the better macthing the pairs are. Please note,
240-
This tensor can contain LoD information to represent a batch of
241-
inputs. One instance of this batch can contain different numbers of
242-
entities.
240+
dist_matrix[i][j] is the distance between A[i] and B[j]. The bigger
241+
the distance is, the better matching the pairs are.
242+
243+
NOTE: This tensor can contain LoD information to represent a batch
244+
of inputs. One instance of this batch can contain different numbers
245+
of entities.
243246
match_type(string|None): The type of matching method, should be
244-
'bipartite' or 'per_prediction', 'bipartite' by defalut.
247+
'bipartite' or 'per_prediction'. [default 'bipartite'].
245248
dist_threshold(float|None): If `match_type` is 'per_prediction',
246249
this threshold is to determine the extra matching bboxes based
247-
on the maximum distance, 0.5 by defalut.
250+
on the maximum distance, 0.5 by default.
248251
Returns:
249-
match_indices(Variable): A 2-D Tensor with shape [N, M] in int type.
250-
N is the batch size. If match_indices[i][j] is -1, it
251-
means B[j] does not match any entity in i-th instance.
252-
Otherwise, it means B[j] is matched to row
253-
match_indices[i][j] in i-th instance. The row number of
254-
i-th instance is saved in match_indices[i][j].
255-
match_distance(Variable): A 2-D Tensor with shape [N, M] in float type.
256-
N is batch size. If match_indices[i][j] is -1,
257-
match_distance[i][j] is also -1.0. Otherwise, assumed
258-
match_distance[i][j] = d, and the row offsets of each instance
259-
are called LoD. Then match_distance[i][j] = dist_matrix[d+LoD[i]][j].
252+
tuple: a tuple with two elements is returned. The first is
253+
matched_indices, the second is matched_distance.
254+
255+
The matched_indices is a 2-D Tensor with shape [N, M] in int type.
256+
N is the batch size. If match_indices[i][j] is -1, it
257+
means B[j] does not match any entity in i-th instance.
258+
Otherwise, it means B[j] is matched to row
259+
match_indices[i][j] in i-th instance. The row number of
260+
i-th instance is saved in match_indices[i][j].
261+
262+
The matched_distance is a 2-D Tensor with shape [N, M] in float type
263+
. N is batch size. If match_indices[i][j] is -1,
264+
match_distance[i][j] is also -1.0. Otherwise, assumed
265+
match_distance[i][j] = d, and the row offsets of each instance
266+
are called LoD. Then match_distance[i][j] =
267+
dist_matrix[d+LoD[i]][j].
268+
269+
Examples:
270+
271+
>>> x = fluid.layers.data(name='x', shape=[4], dtype='float32')
272+
>>> y = fluid.layers.data(name='y', shape=[4], dtype='float32')
273+
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
274+
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
260275
"""
261276
helper = LayerHelper('bipartite_match', **locals())
262277
match_indices = helper.create_tmp_variable(dtype='int32')
@@ -364,15 +379,15 @@ def ssd_loss(location,
364379
normalize=True,
365380
sample_size=None):
366381
"""
367-
**Multi-box loss layer for object dection algorithm of SSD**
382+
**Multi-box loss layer for object detection algorithm of SSD**
368383
369384
This layer is to compute dection loss for SSD given the location offset
370385
predictions, confidence predictions, prior boxes and ground-truth boudding
371386
boxes and labels, and the type of hard example mining. The returned loss
372387
is a weighted sum of the localization loss (or regression loss) and
373388
confidence loss (or classification loss) by performing the following steps:
374389
375-
1. Find matched boundding box by bipartite matching algorithm.
390+
1. Find matched bounding box by bipartite matching algorithm.
376391
377392
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
378393
@@ -435,7 +450,7 @@ def ssd_loss(location,
435450
mining_type (str): The hard example mining type, should be 'hard_example'
436451
or 'max_negative', now only support `max_negative`.
437452
normalize (bool): Whether to normalize the SSD loss by the total number
438-
of output locations, True by defalut.
453+
of output locations, True by default.
439454
sample_size (int): The max sample size of negative box, used only when
440455
mining_type is 'hard_example'.
441456

python/paddle/fluid/layers/io.py

Lines changed: 34 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -302,15 +302,6 @@ def open_recordio_file(filename,
302302
"""
303303
${comment}
304304
305-
>>> import paddle.fluid as fluid
306-
>>> reader = fluid.layers.io.open_recordio_file(
307-
>>> filename='./data.recordio',
308-
>>> shapes=[(3,224,224), (1)],
309-
>>> lod_levels=[0, 0],
310-
>>> dtypes=['float32', 'int64'])
311-
>>> # Via the reader, we can use 'read_file' layer to get data:
312-
>>> image, label = fluid.layers.io.read_file(reader)
313-
314305
Args:
315306
filename(${filename_type}): ${filename_comment}.
316307
shapes(list): List of tuples which declaring data shapes.
@@ -322,6 +313,17 @@ def open_recordio_file(filename,
322313
323314
Returns:
324315
${out_comment}.
316+
317+
Examples:
318+
319+
>>> import paddle.fluid as fluid
320+
>>> reader = fluid.layers.io.open_recordio_file(
321+
>>> filename='./data.recordio',
322+
>>> shapes=[(3,224,224), (1)],
323+
>>> lod_levels=[0, 0],
324+
>>> dtypes=['float32', 'int64'])
325+
>>> # Via the reader, we can use 'read_file' layer to get data:
326+
>>> image, label = fluid.layers.io.read_file(reader)
325327
"""
326328
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
327329
shape_concat = []
@@ -549,6 +551,29 @@ def batch(reader, batch_size):
549551

550552

551553
def double_buffer(reader, place=None, name=None):
554+
"""
555+
Wrap a double buffer reader. The data will copy to target place with a
556+
double buffer queue. If the target place is None, the place that executor
557+
perform on will be used.
558+
559+
Args:
560+
reader(Variable): the reader variable need to be wrapped.
561+
place(Place): the place of target data. Default is the sample place of
562+
executor perform.
563+
564+
name(str): Variable name. None if the user does not care.
565+
566+
Returns:
567+
wrapped reader with double buffer.
568+
569+
Examples:
570+
571+
>>> reader = fluid.layers.open_files(filenames=['somefile'],
572+
>>> shapes=[[-1, 784], [-1, 1]],
573+
>>> dtypes=['float32', 'int64'])
574+
>>> reader = fluid.layers.double_buffer(reader)
575+
>>> img, label = fluid.layers.read_file(reader)
576+
"""
552577
attrs = dict()
553578
if place is not None:
554579
attrs['place'] = str(place).upper()

python/paddle/fluid/layers/ops.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@
6666
'uniform_random_batch_size_like',
6767
'gaussian_random',
6868
'gaussian_random_batch_size_like',
69-
'cumsum',
7069
'scatter',
7170
'sum',
7271
'slice',
@@ -120,3 +119,25 @@ def hard_shrink(x, threshold=None):
120119
>>> data = fluid.layers.data(name="input", shape=[784])
121120
>>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
122121
"""
122+
123+
__all__ += ['cumsum']
124+
125+
_cum_sum_ = generate_layer_fn('cumsum')
126+
127+
128+
def cumsum(x, axis=None, exclusive=None, reverse=None):
129+
kwargs = dict()
130+
for name in locals():
131+
val = locals()[name]
132+
if val is not None:
133+
kwargs[name] = val
134+
135+
return _cum_sum_(**kwargs)
136+
137+
138+
cumsum.__doc__ = _cum_sum_.__doc__ + """
139+
Examples:
140+
141+
>>> data = fluid.layers.data(name="input", shape=[32, 784])
142+
>>> result = fluid.layers.cumsum(data, axis=0)
143+
"""

0 commit comments

Comments
 (0)