Skip to content

Commit 08f927d

Browse files
tink2123shanyi15
authored andcommitted
cherry-pick API reference for release1.2 (#14750)
* Add examples to some functions. (#14645) * Fix comments of ctc_greedy_decoder. (#14679) test=develop * fix api format and examples test=develop * Update executor.py test=develop * Update nn.py * Update nn.py test=develop * Update nn.py test=develop * Update clip.py test=release1.2
1 parent 8feb99b commit 08f927d

File tree

9 files changed

+206
-127
lines changed

9 files changed

+206
-127
lines changed

python/paddle/fluid/clip.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -134,12 +134,12 @@ class GradientClipByValue(BaseGradientClipAttr):
134134
Examples:
135135
.. code-block:: python
136136
137-
w_param_attrs = ParamAttr(name=None,
138-
initializer=UniformInitializer(low=-1.0, high=1.0, seed=0),
137+
w_param_attrs = fluid.ParamAttr(name=None,
138+
initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
139139
learning_rate=1.0,
140-
regularizer=L1Decay(1.0),
140+
regularizer=fluid.regularizer.L1Decay(1.0),
141141
trainable=True,
142-
clip=GradientClipByValue(-1.0, 1.0))
142+
clip=fluid.clip.GradientClipByValue(-1.0, 1.0))
143143
y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs)
144144
"""
145145

@@ -185,12 +185,12 @@ class GradientClipByNorm(BaseGradientClipAttr):
185185
Examples:
186186
.. code-block:: python
187187
188-
w_param_attrs = ParamAttr(name=None,
189-
initializer=UniformInitializer(low=-1.0, high=1.0, seed=0),
188+
w_param_attrs = fluid.ParamAttr(name=None,
189+
initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
190190
learning_rate=1.0,
191-
regularizer=L1Decay(1.0),
191+
regularizer=fluid.regularizer.L1Decay(1.0),
192192
trainable=True,
193-
clip=GradientClipByNorm(clip_norm=2.0))
193+
clip=fluid.clip.GradientClipByNorm(clip_norm=2.0))
194194
y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs)
195195
196196
"""

python/paddle/fluid/executor.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from .framework import Program, default_main_program, Variable
2121
from . import core
2222

23-
__all__ = ['Executor', 'global_scope', 'scope_guard', '_switch_scope']
23+
__all__ = ['Executor', 'global_scope', 'scope_guard']
2424

2525
g_scope = core.Scope()
2626

@@ -407,16 +407,17 @@ def run(self,
407407
408408
Examples:
409409
410-
>>> data = layers.data(name='X', shape=[1], dtype='float32')
411-
>>> hidden = layers.fc(input=data, size=10)
412-
>>> layers.assign(hidden, out)
413-
>>> loss = layers.mean(out)
410+
>>> data = fluid.layers.data(name='X', shape=[1], dtype='float32')
411+
>>> out = fluid.layers.create_tensor(dtype='float32')
412+
>>> hidden = fluid.layers.fc(input=data, size=10)
413+
>>> fluid.layers.assign(hidden,out)
414+
>>> loss = fluid.layers.mean(out)
414415
>>> adam = fluid.optimizer.Adam()
415-
>>> adam.minimize(loss)
416+
>>> adam.minimize(loss)
416417
417418
>>> cpu = core.CPUPlace()
418-
>>> exe = Executor(cpu)
419-
>>> exe.run(default_startup_program())
419+
>>> exe = fluid.Executor(cpu)
420+
>>> exe.run(fluid.default_startup_program())
420421
421422
>>> x = numpy.random.random(size=(10, 1)).astype('float32')
422423
>>> outs = exe.run(

python/paddle/fluid/framework.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,12 +89,13 @@ def name_scope(prefix=None):
8989
9090
Examples:
9191
.. code-block:: python
92+
9293
with name_scope("encoder"):
9394
...
9495
with name_scope("decoder"):
9596
...
96-
with name_scope("attention"):
97-
...
97+
with name_scope("attention"):
98+
...
9899
"""
99100
# TODO(panyx0718): Only [0-9a-z].
100101
assert prefix, "namescope prefix cannot be empty."

python/paddle/fluid/layers/io.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -943,7 +943,18 @@ def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
943943

944944
def shuffle(reader, buffer_size):
945945
"""
946-
Shuffle the reader.
946+
Creates a data reader whose data output is shuffled.
947+
Output from the iterator that created by original reader will be
948+
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
949+
is determined by argument buf_size.
950+
951+
Args:
952+
param reader: the original reader whose output will be shuffled.
953+
type reader: callable
954+
param buf_size: shuffle buffer size.
955+
type buf_size: int
956+
return: the new reader whose output is shuffled.
957+
rtype: callable
947958
"""
948959
return __create_unshared_decorated_reader__(
949960
'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})

python/paddle/fluid/layers/learning_rate_scheduler.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -308,13 +308,9 @@ def piecewise_decay(boundaries, values):
308308

309309

310310
def append_LARS(params_grads, learning_rate, weight_decay):
311-
"""Applies LARS (LAYER-WISE ADAPTIVE RATE SCALING) to learning rate for
312-
each layer.
313-
314-
```python
315-
learning_rate *= local_gw_ratio * sqrt(sumsq(param))
316-
/ (sqrt(sumsq(gradient))+ weight_decay * sqrt(sumsq(param)))
317-
```
311+
"""
312+
Applies LARS (LAYER-WISE ADAPTIVE RATE SCALING) to learning rate for
313+
each layer.
318314
319315
Args:
320316
learning_rate: A learning rate Variable. This
@@ -323,6 +319,11 @@ def append_LARS(params_grads, learning_rate, weight_decay):
323319
324320
Returns:
325321
The decayed learning rate
322+
Examples:
323+
.. code-block:: python
324+
325+
learning_rate *= local_gw_ratio * sqrt(sumsq(param))
326+
/ (sqrt(sumsq(gradient))+ weight_decay * sqrt(sumsq(param)))
326327
"""
327328

328329
def _balanced_weight(param_norm, grad_norm):

0 commit comments

Comments
 (0)