Skip to content

Commit b77c886

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into update-api-reference-1
2 parents 82a4cf1 + e6654c1 commit b77c886

File tree

12 files changed

+517
-175
lines changed

12 files changed

+517
-175
lines changed

paddle/fluid/operators/activation_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
443443
AddComment(R"DOC(
444444
Swish Activation Operator.
445445
446-
$$out = \frac{x}{1 + e^{- \beta x}}$$
446+
$$out = \\frac{x}{1 + e^{- \beta x}}$$
447447
448448
)DOC");
449449
}

paddle/fluid/operators/clip_by_norm_op.cc

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,19 @@ be linearly scaled to make the L2 norm of $Out$ equal to $max\_norm$, as
5454
shown in the following formula:
5555
5656
$$
57-
Out = \frac{max\_norm * X}{norm(X)},
57+
Out = \\frac{max\\_norm * X}{norm(X)},
5858
$$
5959
6060
where $norm(X)$ represents the L2 norm of $X$.
61+
62+
Examples:
63+
.. code-block:: python
64+
65+
data = fluid.layer.data(
66+
name='data', shape=[2, 4, 6], dtype='float32')
67+
reshaped = fluid.layers.clip_by_norm(
68+
x=data, max_norm=0.5)
69+
6170
)DOC");
6271
}
6372
};

paddle/fluid/operators/pool_op.cc

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,6 @@ void Pool2dOpMaker::Make() {
204204
// TODO(dzhwinter): need to registered layout transform function
205205

206206
AddComment(R"DOC(
207-
Pool2d Operator.
208-
209207
The pooling2d operation calculates the output based on
210208
the input, pooling_type and ksize, strides, paddings parameters.
211209
Input(X) and output(Out) are in NCHW format, where N is batch size, C is the
@@ -215,19 +213,28 @@ These two elements represent height and width, respectively.
215213
The input(X) size and output(Out) size may be different.
216214
217215
Example:
216+
218217
Input:
218+
219219
X shape: $(N, C, H_{in}, W_{in})$
220+
220221
Output:
222+
221223
Out shape: $(N, C, H_{out}, W_{out})$
224+
222225
For ceil_mode = false:
223226
$$
224-
H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\
225-
W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
227+
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1
228+
$$
229+
$$
230+
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
226231
$$
227232
For ceil_mode = true:
228233
$$
229-
H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\
230-
W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
234+
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1
235+
$$
236+
$$
237+
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
231238
$$
232239
233240
)DOC");

paddle/fluid/operators/uniform_random_batch_size_like_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,10 @@ class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3535
protected:
3636
void Apply() override {
3737
AddComment(R"DOC(
38-
Uniform random operator
38+
UniformRandomBatchSizeLike operator.
3939
4040
This operator initializes a tensor with the same batch_size as the Input tensor
41-
with random values sampled from a uniform distribution.
41+
with random values sampled from a uniform distribution.
4242
4343
)DOC");
4444
AddAttr<float>("min",

python/paddle/fluid/framework.py

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,6 +1034,37 @@ def clone_variable(self, var):
10341034

10351035

10361036
class Program(object):
1037+
"""
1038+
Python Program. Beneath it is a ProgramDesc, which is used for
1039+
create c++ Program. A program is a self-contained programing
1040+
language like container. It has at least one Block, when the
1041+
control flow op like conditional_block, while_op is included,
1042+
it will contains nested block.
1043+
Please reference the framework.proto for details.
1044+
1045+
Notes: we have default_startup_program and default_main_program
1046+
by default, a pair of them will shared the parameters.
1047+
The default_startup_program only run once to initialize parameters,
1048+
default_main_program run in every minibatch and adjust the weights.
1049+
1050+
Args:
1051+
None
1052+
1053+
Returns:
1054+
Python Program
1055+
1056+
Examples:
1057+
.. code-block:: python
1058+
1059+
main_program = Program()
1060+
startup_program = Program()
1061+
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
1062+
fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
1063+
fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
1064+
fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu")
1065+
1066+
"""
1067+
10371068
def __init__(self):
10381069
self.desc = core.ProgramDesc()
10391070
self.blocks = [Block(self, 0)]
@@ -1099,6 +1130,8 @@ def get_desc(self):
10991130

11001131
def clone(self, for_test=False):
11011132
"""Clone the Program object
1133+
Args:
1134+
for_test(bool): indicate whether clone for test.
11021135
11031136
Set for_test to False when we want to clone the program for training.
11041137
Set for_test to True when we want to clone the program for testing.
@@ -1109,8 +1142,9 @@ def clone(self, for_test=False):
11091142
the is_test attributes in these operators will be set to True for
11101143
testing purposes, otherwise, they remain unchanged.
11111144
1112-
Returns(Program):
1113-
The cloned Program object.
1145+
Returns:
1146+
Program: The cloned Program object.
1147+
11141148
"""
11151149
if for_test:
11161150
p = self.inference_optimize()
@@ -1228,6 +1262,7 @@ def sync_with_cpp(self):
12281262
def copy_param_info_from(self, other):
12291263
"""
12301264
Copy the information of parameters from other program.
1265+
12311266
Args:
12321267
other(Program): Other program
12331268
@@ -1246,6 +1281,7 @@ def copy_param_info_from(self, other):
12461281
def copy_data_info_from(self, other):
12471282
"""
12481283
Copy the information of data variables from other program.
1284+
12491285
Args:
12501286
other(Program): Other program
12511287
@@ -1299,6 +1335,7 @@ def __str__(self):
12991335
def to_string(self, throw_on_error, with_details=False):
13001336
"""
13011337
To debug string.
1338+
13021339
Args:
13031340
throw_on_error(bool): raise exception when self is not initialized
13041341
when throw_on_error is True

python/paddle/fluid/layers/control_flow.py

Lines changed: 52 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -822,17 +822,25 @@ def max_sequence_len(rank_table):
822822

823823

824824
def lod_tensor_to_array(x, table):
825-
""" Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY.
825+
"""
826+
Convert a LoDTensor to a LoDTensorArray.
827+
828+
This function split a LoDTesnor to a LoDTensorArray according to its LoD
829+
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
830+
PaddlePaddle. The generated LoDTensorArray of this function can be further read
831+
or written by `read_from_array()` and `write_to_array()` operators. However,
832+
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
833+
Users should not use it directly.
826834
827835
Args:
828-
x (Variable|list): The LOD tensor to be converted to a LOD tensor array.
836+
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
829837
table (ParamAttr|list): The variable that stores the level of lod
830838
which is ordered by sequence length in
831-
descending order.
839+
descending order. It is generally generated
840+
by `layers.lod_rank_table()` API.
832841
833842
Returns:
834-
Variable: The variable of type array that has been converted from a
835-
tensor.
843+
Variable: The LoDTensorArray that has been converted from the input tensor.
836844
837845
Examples:
838846
.. code-block:: python
@@ -897,8 +905,7 @@ def increment(x, value=1.0, in_place=True):
897905
in_place (bool): If the increment should be performed in-place.
898906
899907
Returns:
900-
Variable: The tensor variable storing the transformation of
901-
element-wise increment of each value in the input.
908+
Variable: The elementwise-incremented object.
902909
903910
Examples:
904911
.. code-block:: python
@@ -940,7 +947,7 @@ def array_write(x, i, array=None):
940947
Variable: The output LOD_TENSOR_ARRAY where the input tensor is written.
941948
942949
Examples:
943-
.. code-block::python
950+
.. code-block:: python
944951
945952
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
946953
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
@@ -1054,14 +1061,31 @@ def equal(x, y, cond=None, **ignored):
10541061

10551062

10561063
def array_read(array, i):
1057-
"""This function performs the operation to read the data in as an
1064+
"""
1065+
This function performs the operation to read the data in as an
10581066
LOD_TENSOR_ARRAY.
1067+
1068+
.. code-block:: text
1069+
1070+
Given:
1071+
1072+
array = [0.6, 0.1, 0.3, 0.1]
1073+
1074+
And:
1075+
1076+
i = 2
1077+
1078+
Then:
1079+
1080+
output = 0.3
1081+
10591082
Args:
1060-
array (Variable|list): The input tensor that will be written to an array.
1061-
i (Variable|list): The subscript index in tensor array, that points the
1062-
place where data will be written to.
1083+
array (Variable|list): The input tensor that store data to be read.
1084+
i (Variable|list): The index of the data to be read from input array.
1085+
10631086
Returns:
10641087
Variable: The tensor type variable that has the data written to it.
1088+
10651089
Examples:
10661090
.. code-block:: python
10671091
@@ -1154,6 +1178,13 @@ def array_length(array):
11541178

11551179

11561180
class ConditionalBlockGuard(BlockGuard):
1181+
"""
1182+
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
1183+
holding a ConditionalBlock, and helping users entering and exiting the
1184+
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
1185+
is generally an internal component of IfElse, users should not use it directly.
1186+
"""
1187+
11571188
def __init__(self, block):
11581189
if not isinstance(block, ConditionalBlock):
11591190
raise TypeError("block should be conditional block")
@@ -1875,26 +1906,26 @@ def reorder_lod_tensor_by_rank(x, rank_table):
18751906

18761907
def is_empty(x, cond=None, **ignored):
18771908
"""
1878-
**Is Empty**
1879-
1880-
This layer returns the truth value of whether the variable is empty.
1909+
Test whether a Variable is empty.
18811910
18821911
Args:
1883-
x(Variable): Operand of *is_empty*
1884-
cond(Variable|None): Optional output variable to store the result
1885-
of *is_empty*
1912+
x (Variable): The Variable to be tested.
1913+
cond (Variable|None): Output parameter. Returns the test result
1914+
of given 'x'. Default: None
18861915
18871916
Returns:
1888-
Variable: The tensor variable storing the output of *is_empty*.
1917+
Variable: A bool scalar. True if 'x' is an empty Variable.
18891918
18901919
Raises:
18911920
TypeError: If input cond is not a variable, or cond's dtype is
1892-
not bool
1921+
not bool.
18931922
18941923
Examples:
18951924
.. code-block:: python
18961925
1897-
less = fluid.layers.is_empty(x=input)
1926+
res = fluid.layers.is_empty(x=input)
1927+
# or:
1928+
fluid.layers.is_empty(x=input, cond=res)
18981929
"""
18991930
helper = LayerHelper("is_empty", **locals())
19001931
if cond is None:

python/paddle/fluid/layers/io.py

Lines changed: 64 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -544,6 +544,41 @@ def shuffle(reader, buffer_size):
544544

545545

546546
def batch(reader, batch_size):
547+
"""
548+
This layer is a reader decorator. It takes a reader and adds
549+
'batching' decoration on it. When reading with the result
550+
decorated reader, output data will be automatically organized
551+
to the form of batches.
552+
553+
Args:
554+
reader(Variable): The reader to be decorated with 'batching'.
555+
batch_size(int): The batch size.
556+
557+
Returns:
558+
Variable: The reader which has been decorated with 'batching'.
559+
560+
Examples:
561+
.. code-block:: python
562+
563+
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
564+
'./data2.recordio'],
565+
shapes=[(3,224,224), (1)],
566+
lod_levels=[0, 0],
567+
dtypes=['float32', 'int64'],
568+
thread_num=2,
569+
buffer_size=2)
570+
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
571+
572+
# If we read data with the raw_reader:
573+
# data = fluid.layers.read_file(raw_reader)
574+
# We can only get data instance by instance.
575+
#
576+
# However, if we read data with the batch_reader:
577+
# data = fluid.layers.read_file(batch_reader)
578+
# Each 5 adjacent instances will be automatically combined together
579+
# to become a batch. So what we get('data') is a batch data instead
580+
# of an instance.
581+
"""
547582
return __create_unshared_decorated_reader__(
548583
'create_batch_reader', reader, {'batch_size': int(batch_size)})
549584

@@ -589,15 +624,41 @@ def parallel(reader):
589624
{})
590625

591626

592-
def read_file(file_obj):
627+
def read_file(reader):
628+
"""
629+
Execute the given reader and get data via it.
630+
631+
A reader is also a Variable. It can be a raw reader generated by
632+
`fluid.layers.open_files()` or a decorated one generated by
633+
`fluid.layers.double_buffer()` and so on.
634+
635+
Args:
636+
637+
reader(Variable): The reader to execute.
638+
639+
Returns:
640+
Tuple[Variable]: Data read via the given reader.
641+
642+
Examples:
643+
.. code-block:: python
644+
645+
data_file = fluid.layers.open_files(
646+
filenames=['mnist.recordio'],
647+
shapes=[(-1, 748), (-1, 1)],
648+
lod_levels=[0, 0],
649+
dtypes=["float32", "int64"])
650+
data_file = fluid.layers.double_buffer(
651+
fluid.layers.batch(data_file, batch_size=64))
652+
input, label = fluid.layers.read_file(data_file)
653+
"""
593654
helper = LayerHelper('read_file')
594655
out = [
595656
helper.create_tmp_variable(
596657
stop_gradient=True, dtype='float32')
597-
for _ in range(len(file_obj.desc.shapes()))
658+
for _ in range(len(reader.desc.shapes()))
598659
]
599660
helper.append_op(
600-
type='read', inputs={'Reader': [file_obj]}, outputs={'Out': out})
661+
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
601662
if len(out) == 1:
602663
return out[0]
603664
else:

0 commit comments

Comments
 (0)