Skip to content

Commit d580983

Browse files
Ligomlkevinng77
andauthored
[cherry-pick2.4]fix numpy issue in codeblock examples (#47664)
* #46765 * #47042 * Remove redundant numpy import (#47483) * #47555 * resolve conflict * resolve conflict * resolve conflict * resolve conflict * resolve conflict * for_codestyle * fix sample code paddle.linalg.multi_dot Co-authored-by: Kevin吴嘉文 <[email protected]>
1 parent 764cea0 commit d580983

32 files changed

+888
-800
lines changed

python/paddle/distributed/fleet/fleet.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1120,7 +1120,6 @@ def amp_init(
11201120
Examples:
11211121
.. code-block:: python
11221122
1123-
import numpy as np
11241123
import paddle
11251124
import paddle.nn.functional as F
11261125
paddle.enable_static()

python/paddle/incubate/nn/functional/fused_transformer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -945,7 +945,6 @@ def fused_multi_transformer(
945945
# required: gpu
946946
import paddle
947947
import paddle.incubate.nn.functional as F
948-
import numpy as np
949948
950949
# input: [batch_size, seq_len, embed_dim]
951950
x = paddle.rand(shape=(2, 4, 128), dtype="float32")

python/paddle/incubate/optimizer/lookahead.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,7 @@ def step(self):
166166
.. code-block:: python
167167
168168
import paddle
169-
import numpy as np
170-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
169+
inp = paddle.rand([1,10], dtype="float32")
171170
linear = paddle.nn.Linear(10, 1)
172171
out = linear(inp)
173172
loss = paddle.mean(out)
@@ -280,8 +279,8 @@ def minimize(
280279
.. code-block:: python
281280
282281
import paddle
283-
import numpy as np
284-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
282+
283+
inp = paddle.rand([1, 10], dtype="float32")
285284
linear = paddle.nn.Linear(10, 1)
286285
out = linear(inp)
287286
loss = paddle.mean(out)

python/paddle/incubate/optimizer/modelaverage.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -342,8 +342,7 @@ def minimize(
342342
.. code-block:: python
343343
344344
import paddle
345-
import numpy as np
346-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
345+
inp = paddle.rand([1, 10], dtype="float32")
347346
linear = paddle.nn.Linear(10, 1)
348347
out = linear(inp)
349348
loss = paddle.mean(out)
@@ -378,8 +377,7 @@ def step(self):
378377
.. code-block:: python
379378
380379
import paddle
381-
import numpy as np
382-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
380+
inp = paddle.rand([1, 10], dtype="float32")
383381
linear = paddle.nn.Linear(10, 1)
384382
out = linear(inp)
385383
loss = paddle.mean(out)
@@ -425,8 +423,7 @@ def apply(self, executor=None, need_restore=True):
425423
.. code-block:: python
426424
427425
import paddle
428-
import numpy as np
429-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
426+
inp = paddle.rand([1, 10], dtype="float32")
430427
linear = paddle.nn.Linear(10, 1)
431428
out = linear(inp)
432429
loss = paddle.mean(out)
@@ -500,8 +497,7 @@ def restore(self, executor=None):
500497
.. code-block:: python
501498
502499
import paddle
503-
import numpy as np
504-
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
500+
inp = paddle.rand([1, 10], dtype="float32")
505501
linear = paddle.nn.Linear(10, 1)
506502
out = linear(inp)
507503
loss = paddle.mean(out)

python/paddle/nn/functional/activation.py

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1313,10 +1313,12 @@ def softshrink(x, threshold=0.5, name=None):
13131313
13141314
import paddle
13151315
import paddle.nn.functional as F
1316-
import numpy as np
13171316
1318-
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
1319-
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
1317+
x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
1318+
out = F.softshrink(x)
1319+
print(out)
1320+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1321+
# [-0.39999998, 0. , 0. , 0.30000001])
13201322
"""
13211323
if threshold < 0:
13221324
raise ValueError(
@@ -1365,10 +1367,12 @@ def softsign(x, name=None):
13651367
13661368
import paddle
13671369
import paddle.nn.functional as F
1368-
import numpy as np
13691370
1370-
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
1371-
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
1371+
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
1372+
out = F.softsign(x)
1373+
print(out)
1374+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1375+
# [-0.28571430, -0.16666666, 0.09090909, 0.23076925])
13721376
"""
13731377
if in_dygraph_mode():
13741378
return _C_ops.softsign(x)
@@ -1405,10 +1409,12 @@ def swish(x, name=None):
14051409
14061410
import paddle
14071411
import paddle.nn.functional as F
1408-
import numpy as np
14091412
1410-
x = paddle.to_tensor(np.array([-2., 0., 1.]))
1411-
out = F.swish(x) # [-0.238406, 0., 0.731059]
1413+
x = paddle.to_tensor([-2., 0., 1.])
1414+
out = F.swish(x)
1415+
print(out)
1416+
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1417+
# [-0.23840584, 0. , 0.73105854])
14121418
"""
14131419
if in_dygraph_mode():
14141420
return _C_ops.swish(x, 1.0)
@@ -1487,10 +1493,12 @@ def tanhshrink(x, name=None):
14871493
14881494
import paddle
14891495
import paddle.nn.functional as F
1490-
import numpy as np
14911496
1492-
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
1493-
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
1497+
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
1498+
out = F.tanhshrink(x)
1499+
print(out)
1500+
# Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1501+
# [-0.02005106, -0.00262468, 0.00033200, 0.00868741])
14941502
"""
14951503
if in_dygraph_mode():
14961504
return _C_ops.tanh_shrink(x)
@@ -1536,10 +1544,12 @@ def thresholded_relu(x, threshold=1.0, name=None):
15361544
15371545
import paddle
15381546
import paddle.nn.functional as F
1539-
import numpy as np
15401547
1541-
x = paddle.to_tensor(np.array([2., 0., 1.]))
1542-
out = F.thresholded_relu(x) # [2., 0., 0.]
1548+
x = paddle.to_tensor([2., 0., 1.])
1549+
out = F.thresholded_relu(x)
1550+
print(out)
1551+
# Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1552+
# [2., 0., 0.])
15431553
"""
15441554

15451555
if in_dygraph_mode():

python/paddle/nn/functional/common.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1965,18 +1965,16 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
19651965
.. code-block:: python
19661966
19671967
import paddle
1968-
import numpy as np
1969-
1970-
x_data = np.array([[[0, 1, 0],
1971-
[ 1, 0, 1]]]).astype("float32")
1972-
print(x_data.shape)
19731968
paddle.disable_static()
1974-
x = paddle.to_tensor(x_data, stop_gradient=False)
1969+
1970+
x = paddle.to_tensor([[[0, 1, 0],
1971+
[ 1, 0, 1]]], dtype="float32", stop_gradient=False)
1972+
19751973
output = paddle.nn.functional.label_smooth(x)
19761974
print(output)
1977-
1978-
#[[[0.03333334 0.93333334 0.03333334]
1979-
# [0.93333334 0.03333334 0.93333334]]]
1975+
# Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=False,
1976+
# [[[0.03333334, 0.93333334, 0.03333334],
1977+
# [0.93333334, 0.03333334, 0.93333334]]])
19801978
"""
19811979
if epsilon > 1.0 or epsilon < 0.0:
19821980
raise ValueError("The value of epsilon must be between 0 and 1.")

python/paddle/nn/functional/conv.py

Lines changed: 25 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -396,26 +396,22 @@ def conv1d(
396396
397397
import paddle
398398
import paddle.nn.functional as F
399-
import numpy as np
400-
x = np.array([[[4, 8, 1, 9],
401-
[7, 2, 0, 9],
402-
[6, 9, 2, 6]]]).astype(np.float32)
403-
w=np.array(
404-
[[[9, 3, 4],
405-
[0, 0, 7],
406-
[2, 5, 6]],
407-
[[0, 3, 4],
408-
[2, 9, 7],
409-
[5, 6, 8]]]).astype(np.float32)
410-
411-
x_var = paddle.to_tensor(x)
412-
w_var = paddle.to_tensor(w)
413-
y_var = F.conv1d(x_var, w_var)
414-
y_np = y_var.numpy()
415-
print(y_np)
416399
417-
# [[[133. 238.]
418-
# [160. 211.]]]
400+
x = paddle.to_tensor([[[4, 8, 1, 9],
401+
[7, 2, 0, 9],
402+
[6, 9, 2, 6]]], dtype="float32")
403+
w = paddle.to_tensor([[[9, 3, 4],
404+
[0, 0, 7],
405+
[2, 5, 6]],
406+
[[0, 3, 4],
407+
[2, 9, 7],
408+
[5, 6, 8]]], dtype="float32")
409+
410+
y = F.conv1d(x, w)
411+
print(y)
412+
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
413+
# [[[133., 238.],
414+
# [160., 211.]]])
419415
"""
420416
cudnn_version = get_cudnn_version()
421417
if cudnn_version is not None:
@@ -949,24 +945,20 @@ def conv1d_transpose(
949945
Examples:
950946
.. code-block:: python
951947
952-
953-
954948
import paddle
955949
import paddle.nn.functional as F
956-
import numpy as np
957-
950+
958951
# shape: (1, 2, 4)
959-
x=np.array([[[4, 0, 9, 7],
960-
[8, 0, 9, 2,]]]).astype(np.float32)
952+
x = paddle.to_tensor([[[4, 0, 9, 7],
953+
[8, 0, 9, 2,]]], dtype="float32")
961954
# shape: (2, 1, 2)
962-
w=np.array([[[7, 0]],
963-
[[4, 2]]]).astype(np.float32)
964-
x_var = paddle.to_tensor(x)
965-
w_var = paddle.to_tensor(w)
966-
y_var = F.conv1d_transpose(x_var, w_var)
967-
print(y_var)
968-
969-
# [[[60. 16. 99. 75. 4.]]]
955+
w = paddle.to_tensor([[[7, 0]],
956+
[[4, 2]]], dtype="float32")
957+
958+
y = F.conv1d_transpose(x, w)
959+
print(y)
960+
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
961+
# [[[60., 16., 99., 75., 4. ]]])
970962
"""
971963
cudnn_version = get_cudnn_version()
972964
if cudnn_version is not None:

python/paddle/nn/functional/loss.py

Lines changed: 39 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -252,16 +252,14 @@ def fluid_softmax_with_cross_entropy(
252252
.. code-block:: python
253253
254254
import paddle
255-
import numpy as np
256255
257-
data = np.random.rand(128).astype("float32")
258-
label = np.random.rand(1).astype("int64")
259-
data = paddle.to_tensor(data)
260-
label = paddle.to_tensor(label)
261-
linear = paddle.nn.Linear(128, 100)
262-
x = linear(data)
263-
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
256+
logits = paddle.to_tensor([0.4, 0.6, 0.9])
257+
label = paddle.randint(high=2, shape=[1], dtype="int64")
258+
259+
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
264260
print(out)
261+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
262+
# [1.15328646])
265263
"""
266264
if _non_static_mode():
267265
if core.is_compiled_with_npu():
@@ -1778,7 +1776,6 @@ def ctc_loss(
17781776
17791777
# declarative mode
17801778
import paddle.nn.functional as F
1781-
import numpy as np
17821779
import paddle
17831780
17841781
# length of the longest logit sequence
@@ -1790,8 +1787,7 @@ def ctc_loss(
17901787
# class num
17911788
class_num = 3
17921789
1793-
np.random.seed(1)
1794-
log_probs = np.array([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
1790+
log_probs = paddle.to_tensor([[[4.17021990e-01, 7.20324516e-01, 1.14374816e-04],
17951791
[3.02332580e-01, 1.46755889e-01, 9.23385918e-02]],
17961792
17971793
[[1.86260208e-01, 3.45560730e-01, 3.96767467e-01],
@@ -1804,30 +1800,30 @@ def ctc_loss(
18041800
[9.68261600e-01, 3.13424170e-01, 6.92322612e-01]],
18051801
18061802
[[8.76389146e-01, 8.94606650e-01, 8.50442126e-02],
1807-
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]]).astype("float32")
1808-
labels = np.array([[1, 2, 2],
1809-
[1, 2, 2]]).astype("int32")
1810-
input_lengths = np.array([5, 5]).astype("int64")
1811-
label_lengths = np.array([3, 3]).astype("int64")
1812-
1813-
log_probs = paddle.to_tensor(log_probs)
1814-
labels = paddle.to_tensor(labels)
1815-
input_lengths = paddle.to_tensor(input_lengths)
1816-
label_lengths = paddle.to_tensor(label_lengths)
1803+
[3.90547849e-02, 1.69830427e-01, 8.78142476e-01]]],
1804+
dtype="float32")
1805+
labels = paddle.to_tensor([[1, 2, 2],
1806+
[1, 2, 2]], dtype="int32")
1807+
input_lengths = paddle.to_tensor([5, 5], dtype="int64")
1808+
label_lengths = paddle.to_tensor([3, 3], dtype="int64")
18171809
18181810
loss = F.ctc_loss(log_probs, labels,
18191811
input_lengths,
18201812
label_lengths,
18211813
blank=0,
18221814
reduction='none')
1823-
print(loss) #[3.9179852 2.9076521]
1815+
print(loss)
1816+
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1817+
# [3.91798496, 2.90765190])
18241818
18251819
loss = F.ctc_loss(log_probs, labels,
18261820
input_lengths,
18271821
label_lengths,
18281822
blank=0,
18291823
reduction='mean')
1830-
print(loss) #[1.1376063]
1824+
print(loss)
1825+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
1826+
# [1.13760614])
18311827
18321828
"""
18331829

@@ -2265,16 +2261,14 @@ def softmax_with_cross_entropy(
22652261
.. code-block:: python
22662262
22672263
import paddle
2268-
import numpy as np
22692264
2270-
data = np.random.rand(128).astype("float32")
2271-
label = np.random.rand(1).astype("int64")
2272-
data = paddle.to_tensor(data)
2273-
label = paddle.to_tensor(label)
2274-
linear = paddle.nn.Linear(128, 100)
2275-
x = linear(data)
2276-
out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label)
2265+
logits = paddle.to_tensor([0.4, 0.6, 0.9], dtype="float32")
2266+
label = paddle.to_tensor([1], dtype="int64")
2267+
2268+
out = paddle.nn.functional.softmax_with_cross_entropy(logits=logits, label=label)
22772269
print(out)
2270+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
2271+
# [1.15328646])
22782272
"""
22792273
return fluid_softmax_with_cross_entropy(
22802274
logits,
@@ -3869,18 +3863,26 @@ def soft_margin_loss(input, label, reduction='mean', name=None):
38693863
.. code-block:: python
38703864
38713865
import paddle
3872-
import numpy as np
38733866
38743867
input = paddle.to_tensor([[0.5, 0.6, 0.7],[0.3, 0.5, 0.2]], 'float32')
38753868
label = paddle.to_tensor([[1.0, -1.0, 1.0],[-1.0, 1.0, 1.0]], 'float32')
38763869
output = paddle.nn.functional.soft_margin_loss(input, label)
3870+
print(output)
3871+
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
3872+
# [0.64022040])
3873+
3874+
input = paddle.uniform(shape=(5, 5), dtype="float32", min=0.1, max=0.8)
3875+
label = paddle.randint(0, 2, shape=(5, 5), dtype="int64")
3876+
label[label==0]=-1
38773877
3878-
input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64)
3879-
label_np = np.random.randint(0, 2, size=(5, 5)).astype(np.int64)
3880-
label_np[label_np==0]=-1
3881-
input = paddle.to_tensor(input_np)
3882-
label = paddle.to_tensor(label_np)
38833878
output = paddle.nn.functional.soft_margin_loss(input, label, reduction='none')
3879+
print(output)
3880+
# Tensor(shape=[5, 5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
3881+
# [[1.09917796, 0.52613139, 0.56263304, 0.82736146, 0.38776723],
3882+
# [1.07179427, 1.11924267, 0.49877715, 1.10026348, 0.46184641],
3883+
# [0.84367639, 0.74795729, 0.44629076, 0.55123353, 0.77659678],
3884+
# [0.39465919, 0.76651484, 0.54485321, 0.76609844, 0.77166790],
3885+
# [0.51283568, 0.84757161, 0.78913331, 1.05268764, 0.45318675]])
38843886
"""
38853887
if reduction not in ['sum', 'mean', 'none']:
38863888
raise ValueError(

0 commit comments

Comments
 (0)