Skip to content

Commit 818de88

Browse files
author
Feiyu Chan
authored
fix multiple documentation errors, test=document_fix (#29210) (#29384)
* fix multiple documentation error, test=document_fix * fix more rst syntax errors, test=document_fix * fix format issues in docstring, test=document_fix
1 parent b6bc4cb commit 818de88

File tree

3 files changed

+146
-317
lines changed

3 files changed

+146
-317
lines changed

python/paddle/nn/functional/extension.py

Lines changed: 46 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,6 @@
2727

2828
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
2929
"""
30-
:alias_main: paddle.nn.functional.diag_embed
31-
:alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed
32-
3330
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
3431
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
3532
of the returned tensor will be selected.
@@ -41,60 +38,59 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
4138
- If offset < 0, it is below the main diagonal.
4239
4340
Args:
44-
input(Variable|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
41+
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
4542
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
4643
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
4744
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
4845
4946
Returns:
50-
Variable, the output data type is the same as input data type.
47+
Tensor, the output data type is the same as input data type.
5148
5249
Examples:
5350
.. code-block:: python
5451
5552
import paddle.nn.functional as F
56-
import paddle.fluid.dygraph as dg
5753
import numpy as np
5854
5955
diag_embed = np.random.randn(2, 3).astype('float32')
6056
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
6157
# [-0.6097662 , -0.01753256, 0.619769 ]]
62-
with dg.guard():
63-
data1 = F.diag_embed(diag_embed)
64-
data1.numpy()
65-
# [[[ 0.7545889 , 0. , 0. ],
66-
# [ 0. , -0.25074545, 0. ],
67-
# [ 0. , 0. , 0.5929117 ]],
68-
69-
# [[-0.6097662 , 0. , 0. ],
70-
# [ 0. , -0.01753256, 0. ],
71-
# [ 0. , 0. , 0.619769 ]]]
72-
73-
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
74-
data2.numpy()
75-
# [[[ 0. , 0. , 0. , 0. ],
76-
# [ 0.7545889 , 0. , 0. , 0. ],
77-
# [ 0. , -0.25074545, 0. , 0. ],
78-
# [ 0. , 0. , 0.5929117 , 0. ]],
79-
#
80-
# [[ 0. , 0. , 0. , 0. ],
81-
# [-0.6097662 , 0. , 0. , 0. ],
82-
# [ 0. , -0.01753256, 0. , 0. ],
83-
# [ 0. , 0. , 0.619769 , 0. ]]]
84-
85-
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
86-
data3.numpy()
87-
# [[[ 0. , 0.7545889 , 0. , 0. ],
88-
# [ 0. , -0.6097662 , 0. , 0. ]],
89-
#
90-
# [[ 0. , 0. , -0.25074545, 0. ],
91-
# [ 0. , 0. , -0.01753256, 0. ]],
92-
#
93-
# [[ 0. , 0. , 0. , 0.5929117 ],
94-
# [ 0. , 0. , 0. , 0.619769 ]],
95-
#
96-
# [[ 0. , 0. , 0. , 0. ],
97-
# [ 0. , 0. , 0. , 0. ]]]
58+
59+
data1 = F.diag_embed(diag_embed)
60+
data1.numpy()
61+
# [[[ 0.7545889 , 0. , 0. ],
62+
# [ 0. , -0.25074545, 0. ],
63+
# [ 0. , 0. , 0.5929117 ]],
64+
65+
# [[-0.6097662 , 0. , 0. ],
66+
# [ 0. , -0.01753256, 0. ],
67+
# [ 0. , 0. , 0.619769 ]]]
68+
69+
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
70+
data2.numpy()
71+
# [[[ 0. , 0. , 0. , 0. ],
72+
# [ 0.7545889 , 0. , 0. , 0. ],
73+
# [ 0. , -0.25074545, 0. , 0. ],
74+
# [ 0. , 0. , 0.5929117 , 0. ]],
75+
#
76+
# [[ 0. , 0. , 0. , 0. ],
77+
# [-0.6097662 , 0. , 0. , 0. ],
78+
# [ 0. , -0.01753256, 0. , 0. ],
79+
# [ 0. , 0. , 0.619769 , 0. ]]]
80+
81+
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
82+
data3.numpy()
83+
# [[[ 0. , 0.7545889 , 0. , 0. ],
84+
# [ 0. , -0.6097662 , 0. , 0. ]],
85+
#
86+
# [[ 0. , 0. , -0.25074545, 0. ],
87+
# [ 0. , 0. , -0.01753256, 0. ]],
88+
#
89+
# [[ 0. , 0. , 0. , 0.5929117 ],
90+
# [ 0. , 0. , 0. , 0.619769 ]],
91+
#
92+
# [[ 0. , 0. , 0. , 0. ],
93+
# [ 0. , 0. , 0. , 0. ]]]
9894
"""
9995
inputs = {'Input': [input]}
10096
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
@@ -151,15 +147,15 @@ def row_conv(input, weight, act=None):
151147
${comment}
152148
153149
Args:
154-
input (Variable): the input(X) is a LodTensor or tensor, LodTensor(X)
155-
supports variable time-length input sequences. The underlying
150+
input (Tensor): the input(X) is a LodTensor or tensor, LodTensor(X)
151+
supports variable time-length input sequences. The underlying
156152
tensor in this LoDTensor is a matrix with shape (T, D), where
157153
T is the total time steps in this mini-batch and D is the input
158154
data dimension.
159155
If the input is a padded minibatch, the shape of the input is
160156
(N, T, D), N is batch size, T is the max time steps in the batch,
161157
D is the input data dimension.
162-
weight (Variable): The weight. A Tensor with shape
158+
weight (Tensor): The weight. A Tensor with shape
163159
(future_context_size + 1, D), where future_context_size is the
164160
context size of the RowConv operator.
165161
act (str): Non-linear activation to be applied to output variable.
@@ -171,7 +167,6 @@ def row_conv(input, weight, act=None):
171167
.. code-block:: python
172168
173169
from paddle import fluid, nn
174-
import paddle.fluid.dygraph as dg
175170
import paddle.nn.functional as F
176171
import numpy as np
177172
@@ -182,16 +177,12 @@ def row_conv(input, weight, act=None):
182177
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
183178
weight = np.random.randn(context_size + 1, feature_size).astype(np.float32)
184179
185-
place = fluid.CPUPlace()
186-
with dg.guard(place):
187-
x_var = dg.to_variable(x)
188-
w_var = dg.to_variable(weight)
189-
y_var = F.extension.row_conv(x_var, w_var)
190-
y_np = y_var.numpy()
191-
192-
print(y_np.shape)
180+
x_var = paddle.to_tensor(x)
181+
w_var = paddle.to_tensor(weight)
182+
y_var = F.extension.row_conv(x_var, w_var)
183+
print(y_var.shape)
193184
194-
# (4, 8, 6)
185+
# [4, 8, 6]
195186
"""
196187

197188
if in_dygraph_mode():

python/paddle/nn/layer/extension.py

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,6 @@
2020

2121
class RowConv(layers.Layer):
2222
"""
23-
:alias_main: paddle.nn.RowConv
24-
:alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv
25-
2623
**Row-convolution operator**
2724
2825
The row convolution is called lookahead convolution. This operator was
@@ -50,7 +47,7 @@ class RowConv(layers.Layer):
5047
of convolution kernel is [future_context_size + 1, D].
5148
param_attr (ParamAttr): Attributes of parameters, including
5249
name, initializer etc. Default: None.
53-
act (str): Non-linear activation to be applied to output variable. Default: None.
50+
act (str): Non-linear activation to be applied to output tensor. Default: None.
5451
dtype (str, optional): Data type, it can be "float32". Default: "float32".
5552
5653
Attributes:
@@ -63,8 +60,7 @@ class RowConv(layers.Layer):
6360
Examples:
6461
.. code-block:: python
6562
66-
from paddle import fluid, nn
67-
import paddle.fluid.dygraph as dg
63+
from paddle import nn
6864
import paddle.nn.functional as F
6965
import numpy as np
7066
@@ -75,15 +71,12 @@ class RowConv(layers.Layer):
7571
7672
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
7773
78-
place = fluid.CPUPlace()
79-
with dg.guard(place):
80-
x_var = dg.to_variable(x)
81-
conv = nn.RowConv(feature_size, context_size)
82-
y_var = conv(x_var)
83-
y_np = y_var.numpy()
84-
print(y_np.shape)
74+
x = paddle.to_tensor(x)
75+
conv = nn.RowConv(feature_size, context_size)
76+
y = conv(x)
77+
print(y.shape)
8578
86-
# (4, 8, 6)
79+
# [4, 8, 6]
8780
"""
8881

8982
def __init__(self,

0 commit comments

Comments
 (0)