Skip to content

Commit 7838058

Browse files
authored
Fix api docs in RNN, Transformer, layer_norm, WeightNormParamAttr (#29235) (#29407)
* Fix api docs in RNN, Transformer, layer_norm, WeightNormParamAttr. test=develop * Fix api doc for print in label_smooth. test=develop * Update api docs according to review comments. Add name argument in RNN back. test=develop
1 parent 3a09672 commit 7838058

File tree

4 files changed

+32
-39
lines changed

4 files changed

+32
-39
lines changed

python/paddle/fluid/layers/nn.py

Lines changed: 25 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -842,52 +842,52 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
842842
def crf_decoding(input, param_attr, label=None, length=None):
843843
"""
844844
:api_attr: Static Graph
845+
845846
${comment}
846847

847848
Args:
848-
input(${emission_type}): ${emission_comment}
849+
input(Tensor): ${emission_comment}
849850

850851
param_attr (ParamAttr|None): To specify the weight parameter attribute.
851852
Default: None, which means the default weight parameter property is
852-
used. See usage for details in :ref:`api_fluid_ParamAttr` .
853+
used. See usage for details in :ref:`api_paddle_fluid_param_attr_ParamAttr` .
853854

854855
label(${label_type}, optional): ${label_comment}
855856

856857
length(${length_type}, optional): ${length_comment}
857858

858859
Returns:
859-
Variable: ${viterbi_path_comment}
860+
Tensor: ${viterbi_path_comment}
860861

861862
Examples:
862863
.. code-block:: python
863864

864-
import paddle.fluid as fluid
865865
import paddle
866866
paddle.enable_static()
867867

868868
# LoDTensor-based example
869869
num_labels = 10
870-
feature = fluid.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
871-
label = fluid.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
872-
emission = fluid.layers.fc(input=feature, size=num_labels)
870+
feature = paddle.static.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
871+
label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
872+
emission = paddle.static.nn.fc(feature, size=num_labels)
873873

874-
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label,
875-
param_attr=fluid.ParamAttr(name="crfw"))
876-
crf_decode = fluid.layers.crf_decoding(input=emission,
877-
param_attr=fluid.ParamAttr(name="crfw"))
874+
crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label,
875+
param_attr=paddle.ParamAttr(name="crfw"))
876+
crf_decode = paddle.static.nn.crf_decoding(input=emission,
877+
param_attr=paddle.ParamAttr(name="crfw"))
878878

879879
# Common tensor example
880880
num_labels, max_len = 10, 20
881-
feature = fluid.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
882-
label = fluid.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
883-
length = fluid.data(name='length', shape=[-1, 1], dtype='int64')
884-
emission = fluid.layers.fc(input=feature, size=num_labels,
881+
feature = paddle.static.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
882+
label = paddle.static.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
883+
length = paddle.static.data(name='length', shape=[-1, 1], dtype='int64')
884+
emission = paddle.static.nn.fc(feature, size=num_labels,
885885
num_flatten_dims=2)
886886

887-
crf_cost = fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
888-
param_attr=fluid.ParamAttr(name="crfw_pad"))
889-
crf_decode = fluid.layers.crf_decoding(input=emission, length=length,
890-
param_attr=fluid.ParamAttr(name="crfw_pad"))
887+
crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
888+
param_attr=paddle.ParamAttr(name="crfw_pad"))
889+
crf_decode = paddle.static.nn.crf_decoding(input=emission, length=length,
890+
param_attr=paddle.ParamAttr(name="crfw_pad"))
891891
"""
892892
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
893893
'crf_decoding')
@@ -3427,7 +3427,7 @@ def layer_norm(input,
34273427
- :math:`b`: the trainable bias parameter.
34283428

34293429
Args:
3430-
input(Variable): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
3430+
input(Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
34313431
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
34323432
normalization. Default: True.
34333433
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
@@ -3452,24 +3452,17 @@ def layer_norm(input,
34523452
name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
34533453

34543454
Returns:
3455-
Variable: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
3455+
Tensor: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` .
34563456

34573457
Examples:
34583458

34593459
.. code-block:: python
34603460

3461-
import paddle.fluid as fluid
3462-
import numpy as np
34633461
import paddle
34643462
paddle.enable_static()
3465-
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
3466-
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
3467-
place = fluid.CPUPlace()
3468-
exe = fluid.Executor(place)
3469-
exe.run(fluid.default_startup_program())
3470-
np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32')
3471-
output = exe.run(feed={"x": np_x}, fetch_list = [hidden1])
3472-
print(output)
3463+
x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
3464+
output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
3465+
print(output.shape) # [8, 32, 32]
34733466
"""
34743467
assert in_dygraph_mode(
34753468
) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
@@ -9736,7 +9729,7 @@ def prelu(x, mode, param_attr=None, name=None):
97369729
if mode not in ['all', 'channel', 'element']:
97379730
raise ValueError('mode should be one of all, channel, element.')
97389731
alpha_shape = [1]
9739-
# NOTE(): The input of this API should be ``N,C,...`` format,
9732+
# NOTE(): The input of this API should be ``N,C,...`` format,
97409733
# which means x.shape[0] is batch_size and x.shape[0] is channel.
97419734
if mode == 'channel':
97429735
assert len(

python/paddle/fluid/param_attr.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -225,8 +225,8 @@ class WeightNormParamAttr(ParamAttr):
225225
Note:
226226
``gradient_clip`` of ``ParamAttr`` HAS BEEN DEPRECATED since 2.0.
227227
Please use ``need_clip`` in ``ParamAttr`` to speficiy the clip scope.
228-
There are three clipping strategies: :ref:`api_paddle_nn_GradientClipByGlobalNorm` ,
229-
:ref:`api_fluid_clip_GradientClipByNorm` , :ref:`api_fluid_clip_GradientClipByValue` .
228+
There are three clipping strategies: :ref:`api_paddle_nn_ClipGradByGlobalNorm` ,
229+
:ref:`api_paddle_nn_ClipGradByNorm` , :ref:`api_paddle_nn_ClipGradByValue` .
230230
231231
232232
Args:
@@ -244,8 +244,8 @@ class WeightNormParamAttr(ParamAttr):
244244
optimizer is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
245245
Default 1.0.
246246
regularizer (WeightDecayRegularizer, optional): Regularization strategy. There are
247-
two method: :ref:`api_paddle_fluid_regularizer_L1Decay` ,
248-
:ref:`api_paddle_fluid_regularizer_L2DecayRegularizer`.
247+
two method: :ref:`api_paddle_regularizer_L1Decay` ,
248+
:ref:`api_paddle_regularizer_L2Decay`.
249249
If regularizer isralso set in ``optimizer``
250250
(such as :ref:`api_paddle_optimizer_SGD` ), that regularizer setting in
251251
optimizer will be ignored. Default None, meaning there is no regularization.

python/paddle/nn/functional/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1554,7 +1554,7 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
15541554
paddle.disable_static()
15551555
x = paddle.to_tensor(x_data, stop_gradient=False)
15561556
output = paddle.nn.functional.label_smooth(x)
1557-
print(output.numpy())
1557+
print(output)
15581558
15591559
#[[[0.03333334 0.93333334 0.03333334]
15601560
# [0.93333334 0.03333334 0.93333334]]]

python/paddle/nn/layer/transformer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ class TransformerDecoderLayer(Layer):
643643
for linear in FFN. Otherwise, the three sub-layers all uses it as
644644
`weight_attr` to create parameters. Default: None, which means the
645645
default weight parameter property is used. See usage for details
646-
in :ref:`api_fluid_ParamAttr` .
646+
in :ref:`api_paddle_fluid_param_attr_ParamAttr` .
647647
bias_attr (ParamAttr|tuple|bool, optional): To specify the bias parameter property.
648648
If it is a tuple, `bias_attr[0]` would be used as `bias_attr` for
649649
self attention, `bias_attr[1]` would be used as `bias_attr` for
@@ -1199,7 +1199,7 @@ def generate_square_subsequent_mask(self, length):
11991199
transformer_paddle = Transformer(
12001200
d_model, n_head, dim_feedforward=dim_feedforward)
12011201
mask = transformer_paddle.generate_square_subsequent_mask(length)
1202-
print(mask.numpy())
1202+
print(mask)
12031203
12041204
# [[ 0. -inf -inf -inf -inf]
12051205
# [ 0. 0. -inf -inf -inf]

0 commit comments

Comments
 (0)