Skip to content

Commit c2ad381

Browse files
authored
Merge pull request #11560 from JiayiFeng/doc_non_layer_api
Doc of non layer api
2 parents 0d2dd1a + 457d81b commit c2ad381

File tree

3 files changed

+646
-112
lines changed

3 files changed

+646
-112
lines changed

python/paddle/fluid/average.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,25 @@ def _is_number_or_matrix_(var):
3636

3737

3838
class WeightedAverage(object):
39+
"""
40+
Calculate weighted average.
41+
42+
The average calculating is accomplished via Python totally.
43+
They do not change Paddle's Program, nor do anything to
44+
modify NN model's configuration. They are completely
45+
wrappers of Python functions.
46+
47+
Examples:
48+
.. code-block:: python
49+
avg = fluid.average.WeightedAverage()
50+
avg.add(value=2.0, weight=1)
51+
avg.add(value=4.0, weight=2)
52+
avg.eval()
53+
54+
# The result is 3.333333333.
55+
# For (2.0 * 1 + 4.0 * 2) / (1 + 2) = 3.333333333
56+
"""
57+
3958
def __init__(self):
4059
warnings.warn(
4160
"The %s is deprecated, please use fluid.metrics.Accuracy instead." %

python/paddle/fluid/backward.py

Lines changed: 59 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -147,15 +147,15 @@ def _addup_repetitive_outputs_(op_descs):
147147
else:
148148
if len(renamed_vars[var_name]) == 1:
149149
new_name = var_name + "@RENAME@" + \
150-
str(var_rename_count[var_name])
150+
str(var_rename_count[var_name])
151151
var_rename_count[var_name] += 1
152152
# rename original var_name
153153
renamed_vars[var_name][0] = new_name
154154
_rename_arg_(op_descs, var_name, new_name, 0, idx)
155155
_rename_arg_(pending_sum_ops, var_name, new_name)
156156

157157
new_name = var_name + "@RENAME@" + \
158-
str(var_rename_count[var_name])
158+
str(var_rename_count[var_name])
159159
var_rename_count[var_name] += 1
160160
op_desc.rename_output(var_name, new_name)
161161
renamed_vars[var_name].append(new_name)
@@ -435,18 +435,65 @@ def _get_stop_gradients_(program):
435435
def append_backward(loss, parameter_list=None, no_grad_set=None,
436436
callbacks=None):
437437
"""
438-
Append backward part to main_program
438+
Append backward part to main_program.
439439
440-
Args:
441-
loss(Variable): The variable generated by cost function.
442-
parameter_list(list[string]): Parameters that need to be updated by
443-
optimizer. If None, it means all parameters need to be updated.
444-
no_grad_set(set): Variables that have no gradients in Block 0.
445-
All variables with `step_gradient=True` from all blocks will be
446-
automatically added.
440+
A complete neural network training is made up of forward and backward
441+
propagation. However, when we configure a network, we only need to
442+
specify its forwrd part. The backward part is generated automatically
443+
according to the forward part by this function.
447444
448-
Return:
449-
(list[(Variable,Variable)]): list of (parameter, gradient) pair.
445+
In most cases, users do not need to invoke this function manually. It
446+
will be automatically invoked by the optimizer's `minimize` function.
447+
448+
Args:
449+
loss(Variable): The loss variable of the network.
450+
parameter_list(list[string]|None): Names of parameters that need
451+
to be updated by optimizers.
452+
If it is None, all parameters
453+
will be updated.
454+
Default: None
455+
no_grad_set(set|None): Variables in the Block 0 whose gradients
456+
should be ignored. All variables with
457+
`step_gradient=True` from all blocks will
458+
be automatically added into this set.
459+
Default: None
460+
callbacks(list[callable object]|None): The callbacks are used for
461+
doing some custom jobs during
462+
backward part building. All
463+
callable objects in it will
464+
be invoked once each time a
465+
new gradient operator is added
466+
into the program. The callable
467+
object must has two input
468+
parameters: 'block' and 'context'.
469+
The 'block' is the block which
470+
the new gradient operator will
471+
be added to. The 'context' is a
472+
map, whose keys are gradient
473+
variable names and values are
474+
corresponding original variables.
475+
In addition to this, the 'context'
476+
has another special key-value pair:
477+
the key is string '__current_op_desc__'
478+
and the value is the op_desc of the
479+
gradient operator who has just
480+
triggered the callable object.
481+
482+
Returns:
483+
list[(Variable,Variable)]: Pairs of parameter and its
484+
corresponding gradients. The key is the parameter and the
485+
value is gradient variable.
486+
487+
Raises:
488+
AssertionError: If `loss` is not an instance of Variable.
489+
490+
Examples:
491+
.. code-block:: python
492+
493+
# network configuration code
494+
# ...
495+
avg_loss = fluid.layers.mean(loss)
496+
param_grad_list = fluid.backward.append_backward(loss=avg_loss)
450497
"""
451498
assert isinstance(loss, framework.Variable)
452499

0 commit comments

Comments
 (0)