Skip to content

Commit 7a6ddc9

Browse files
authored
[API Compatibilities] Add rand_like, multinomial and var (#74920)
* add rand_like, multinomial and var * fix rand_like impl * update rand_like, rand, uniform implementation * aligning logic with rand_like * remove redundant operations
1 parent 42153e0 commit 7a6ddc9

File tree

7 files changed

+943
-24
lines changed

7 files changed

+943
-24
lines changed

python/paddle/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -661,6 +661,7 @@ def new_init(self, *args, **kwargs):
661661
normal_,
662662
poisson,
663663
rand,
664+
rand_like,
664665
randint,
665666
randint_like,
666667
randn,
@@ -1252,6 +1253,7 @@ def __dir__(self):
12521253
'geometric_',
12531254
'randn',
12541255
'randn_like',
1256+
'rand_like',
12551257
'strided_slice',
12561258
'unique',
12571259
'unique_consecutive',

python/paddle/tensor/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,7 @@
454454
normal_,
455455
poisson,
456456
rand,
457+
rand_like,
457458
randint,
458459
randint_like,
459460
randn,

python/paddle/tensor/random.py

Lines changed: 123 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -453,6 +453,8 @@ def multinomial(
453453
num_samples: int = 1,
454454
replacement: bool = False,
455455
name: str | None = None,
456+
*,
457+
out: Tensor | None = None,
456458
) -> Tensor:
457459
"""
458460
Returns a Tensor filled with random values sampled from a Multinomial
@@ -474,6 +476,7 @@ def multinomial(
474476
name(str|None, optional): The default value is None. Normally there is no
475477
need for user to set this property. For more information, please
476478
refer to :ref:`api_guide_Name`.
479+
out (Tensor|None, optional): The output Tensor. If set, the result will be stored in this Tensor. Default is None.
477480
Returns:
478481
Tensor, A Tensor filled with sampled category index after ``num_samples`` times samples.
479482
@@ -516,7 +519,7 @@ def multinomial(
516519
"""
517520

518521
if in_dynamic_or_pir_mode():
519-
return _C_ops.multinomial(x, num_samples, replacement)
522+
return _C_ops.multinomial(x, num_samples, replacement, out=out)
520523
else:
521524
check_variable_and_dtype(
522525
x, "x", ["uint16", "float16", "float32", "float64"], "multinomial"
@@ -1150,14 +1153,104 @@ def randn_like(
11501153
"""
11511154
if dtype is None:
11521155
dtype = x.dtype
1153-
else:
1154-
if not isinstance(dtype, (core.VarDesc.VarType, core.DataType)):
1155-
dtype = convert_np_dtype_to_dtype_(dtype)
11561156
shape = paddle.shape(x)
11571157

11581158
return standard_normal(shape, dtype, name)
11591159

11601160

1161+
def rand_like(
1162+
input,
1163+
name: str | None = None,
1164+
*,
1165+
dtype: DTypeLike | None = None,
1166+
device: PlaceLike | None = None,
1167+
requires_grad: bool = False,
1168+
):
1169+
"""
1170+
Returns a tensor with the same size as input that is filled with random numbers from a uniform distribution on the interval [0, 1).
1171+
1172+
Args:
1173+
input (Tensor): The input multi-dimensional tensor which specifies shape. The dtype of ``input``
1174+
can be float16, float64, float8_e4m3fn, float32, bfloat16.
1175+
name (str|None, optional): The default value is None. Normally there is no
1176+
need for user to set this property. For more information, please
1177+
refer to :ref:`api_guide_Name`.
1178+
dtype (str|np.dtype|paddle.dtype|None, optional): The data type of the
1179+
output tensor. Supported data types: float16, float64, float8_e4m3fn, float32, bfloat16.
1180+
If ``dtype`` is None, the data type is the same as input's data type. Default is None.
1181+
device (str|paddle.Place|None, optional): The device on which to place the created tensor.
1182+
If None, the device is the same as input's device. Default is None.
1183+
requires_grad (bool, optional): Whether to compute gradients for the created tensor.
1184+
Default is False.
1185+
1186+
Returns:
1187+
Tensor: A Tensor with the same size as input that is filled with random numbers from a uniform distribution on the interval [0, 1).
1188+
1189+
Examples:
1190+
.. code-block:: python
1191+
1192+
>>> import paddle
1193+
1194+
>>> # example 1:
1195+
>>> # dtype is None and the dtype of input is float32
1196+
>>> x = paddle.zeros((2, 3)).astype("float32")
1197+
>>> out1 = paddle.rand_like(x)
1198+
>>> print(out1)
1199+
>>> # doctest: +SKIP("Random output")
1200+
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
1201+
[[0.34962332, 0.82356787, 0.91275704],
1202+
[0.12328923, 0.58439839, 0.32735515]])
1203+
>>> # doctest: -SKIP
1204+
>>> print(out1.dtype)
1205+
paddle.float32
1206+
1207+
>>> # example 2:
1208+
>>> # dtype is None and the dtype of input is float64
1209+
>>> x = paddle.zeros((2, 3)).astype("float64")
1210+
>>> out2 = paddle.rand_like(x)
1211+
>>> print(out2)
1212+
>>> # doctest: +SKIP("Random output")
1213+
Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
1214+
[[0.73964721, 0.28413662, 0.91918457],
1215+
[0.62838351, 0.39185921, 0.51561823]])
1216+
>>> # doctest: -SKIP
1217+
>>> print(out2.dtype)
1218+
paddle.float64
1219+
1220+
>>> # example 3:
1221+
>>> # dtype is float64 and the dtype of input is float32
1222+
>>> x = paddle.zeros((2, 3)).astype("float32")
1223+
>>> out3 = paddle.rand_like(x, dtype="float64")
1224+
>>> print(out3)
1225+
>>> # doctest: +SKIP("Random output")
1226+
Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
1227+
[[0.84492219, 0.11572551, 0.73868765],
1228+
[0.90269387, 0.45644298, 0.28739912]])
1229+
>>> # doctest: -SKIP
1230+
>>> print(out3.dtype)
1231+
paddle.float64
1232+
1233+
>>> # example 4:
1234+
>>> # with requires_grad=True
1235+
>>> x = paddle.zeros((2, 2)).astype("float32")
1236+
>>> out4 = paddle.rand_like(x, requires_grad=True)
1237+
>>> print(out4.stop_gradient)
1238+
False
1239+
"""
1240+
if dtype is None:
1241+
dtype = input.dtype
1242+
1243+
return uniform(
1244+
shape=input.shape,
1245+
dtype=dtype,
1246+
min=0.0,
1247+
max=1.0,
1248+
name=name,
1249+
device=device,
1250+
requires_grad=requires_grad,
1251+
)
1252+
1253+
11611254
def normal(
11621255
mean: complex | Tensor = 0.0,
11631256
std: float | Tensor = 1.0,
@@ -1370,6 +1463,10 @@ def uniform(
13701463
max: float = 1.0,
13711464
seed: int = 0,
13721465
name: str | None = None,
1466+
*,
1467+
out: Tensor | None = None,
1468+
device: PlaceLike | None = None,
1469+
requires_grad: bool = False,
13731470
) -> Tensor:
13741471
"""
13751472
Returns a Tensor filled with random values sampled from a uniform
@@ -1460,14 +1557,23 @@ def uniform(
14601557

14611558
if in_dynamic_mode():
14621559
shape = paddle.utils.convert_shape_to_list(shape)
1463-
return _C_ops.uniform(
1560+
place = (
1561+
_current_expected_place()
1562+
if device is None
1563+
else _get_paddle_place(device)
1564+
)
1565+
tensor = _C_ops.uniform(
14641566
shape,
14651567
dtype,
14661568
float(min),
14671569
float(max),
14681570
seed,
1469-
_current_expected_place(),
1571+
place,
1572+
out=out,
14701573
)
1574+
if requires_grad is True:
1575+
tensor.stop_gradient = False
1576+
return tensor
14711577
elif in_pir_mode():
14721578
check_type(
14731579
shape, 'shape', (list, tuple, paddle.pir.Value), 'uniform/rand'
@@ -1482,14 +1588,23 @@ def uniform(
14821588
if isinstance(max, int):
14831589
max = float(max)
14841590

1485-
return _C_ops.uniform(
1591+
place = (
1592+
_current_expected_place()
1593+
if device is None
1594+
else _get_paddle_place(device)
1595+
)
1596+
tensor = _C_ops.uniform(
14861597
shape,
14871598
dtype,
14881599
min,
14891600
max,
14901601
seed,
1491-
_current_expected_place(),
1602+
place,
1603+
out=out,
14921604
)
1605+
if requires_grad is True:
1606+
tensor.stop_gradient = False
1607+
return tensor
14931608
else:
14941609
check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
14951610
check_dtype(dtype, 'dtype', supported_dtypes, 'uniform/rand')

python/paddle/tensor/stat.py

Lines changed: 46 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,10 @@
3232

3333
from ..base.data_feeder import check_type, check_variable_and_dtype
3434
from ..common_ops_import import Variable
35-
from ..framework import LayerHelper, core
35+
from ..framework import (
36+
LayerHelper,
37+
core,
38+
)
3639
from .math import _get_reduce_axis_with_tensor
3740

3841
if TYPE_CHECKING:
@@ -157,9 +160,12 @@ def mean(
157160
def var(
158161
x: Tensor,
159162
axis: int | Sequence[int] | None = None,
160-
unbiased: bool = True,
163+
unbiased: bool | None = None,
161164
keepdim: bool = False,
162165
name: str | None = None,
166+
*,
167+
correction: float = 1,
168+
out: Tensor | None = None,
163169
) -> Tensor:
164170
"""
165171
Computes the variance of ``x`` along ``axis`` .
@@ -181,6 +187,9 @@ def var(
181187
unbiased (bool, optional): Whether to use the unbiased estimation. If ``unbiased`` is True, the divisor used in the computation is :math:`N - 1`, where :math:`N` represents the number of elements along ``axis`` , otherwise the divisor is :math:`N`. Default is True.
182188
keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the input unless keep_dim is true. Default is False.
183189
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
190+
correction (int|float, optional): Difference between the sample size and sample degrees of freedom.
191+
Defaults to 1 (Bessel's correction). If unbiased is specified, this parameter is ignored.
192+
out (Tensor|None, optional): Output tensor. Default is None.
184193
185194
Returns:
186195
Tensor, results of variance along ``axis`` of ``x``, with the same data type as ``x``.
@@ -198,28 +207,41 @@ def var(
198207
>>> print(out2.numpy())
199208
[1. 4.3333335]
200209
"""
210+
if unbiased is not None and correction != 1:
211+
raise ValueError("Only one of unbiased and correction may be given")
212+
213+
if unbiased is not None:
214+
actual_correction = 1.0 if unbiased else 0.0
215+
else:
216+
actual_correction = float(correction)
201217
if not in_dynamic_mode():
202218
check_variable_and_dtype(
203219
x, 'x', ['float16', 'float32', 'float64'], 'var'
204220
)
205221

206222
u = mean(x, axis, True, name)
207223
dtype = paddle.float32 if x.dtype == paddle.float16 else x.dtype
208-
out = paddle.sum(
224+
out_tensor = paddle.sum(
209225
paddle.pow((x - u), 2), axis, keepdim=keepdim, name=name, dtype=dtype
210226
)
211227

212228
n = paddle.cast(paddle.numel(x), "int64") / paddle.cast(
213-
paddle.numel(out), "int64"
229+
paddle.numel(out_tensor), "int64"
214230
)
215231
n = n.astype(dtype)
216-
if unbiased:
217-
one_const = paddle.ones([], x.dtype)
218-
if paddle.in_dynamic_mode() and n <= one_const:
232+
233+
if actual_correction != 0:
234+
corrected_n = n - actual_correction
235+
corrected_n = paddle.maximum(
236+
corrected_n, paddle.zeros_like(corrected_n)
237+
)
238+
if paddle.in_dynamic_mode() and paddle.any(corrected_n <= 0):
219239
warnings.warn("Degrees of freedom is <= 0.", stacklevel=2)
220-
n = n - 1.0
221-
n.stop_gradient = True
222-
out /= n
240+
else:
241+
corrected_n = n
242+
243+
corrected_n.stop_gradient = True
244+
out_tensor /= corrected_n
223245

224246
def _replace_nan(out):
225247
indices = paddle.arange(out.numel(), dtype='int64')
@@ -229,12 +251,20 @@ def _replace_nan(out):
229251
return out_nan
230252

231253
if 0 in x.shape:
232-
out = _replace_nan(out)
233-
if len(x.shape) == 0 and not unbiased:
234-
out = paddle.to_tensor(0, stop_gradient=out.stop_gradient)
235-
if out.dtype != x.dtype:
236-
return out.astype(x.dtype)
237-
return out
254+
out_tensor = _replace_nan(out_tensor)
255+
if len(x.shape) == 0 and actual_correction == 0:
256+
out_tensor = paddle.to_tensor(0, stop_gradient=out_tensor.stop_gradient)
257+
258+
if out_tensor.dtype != x.dtype:
259+
result = out_tensor.astype(x.dtype)
260+
else:
261+
result = out_tensor
262+
263+
if out is not None:
264+
paddle.assign(result, out)
265+
return out
266+
267+
return result
238268

239269

240270
def std(

0 commit comments

Comments
 (0)