Skip to content

Commit 4df7c64

Browse files
[API Compatibility No.67] Add torch-style arg and alias for frac -part (PaddlePaddle#76671)
* use decorator to introduce arg alias for paddle.frac * add compatibility tests for paddle.frac
1 parent 0a80d1c commit 4df7c64

File tree

2 files changed

+42
-2
lines changed

2 files changed

+42
-2
lines changed

python/paddle/tensor/math.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5984,13 +5984,17 @@ def heaviside(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
59845984
return _elementwise_op(LayerHelper(op_type, **locals()))
59855985

59865986

5987-
def frac(x: Tensor, name: str | None = None) -> Tensor:
5987+
@param_one_alias(["x", "input"])
5988+
def frac(
5989+
x: Tensor, name: str | None = None, *, out: Tensor | None = None
5990+
) -> Tensor:
59885991
"""
59895992
This API is used to return the fractional portion of each element in input.
59905993
59915994
Args:
59925995
x (Tensor): The input tensor, which data type should be int32, int64, float32, float64.
59935996
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
5997+
out (Tensor, optional): The output tensor. Default: None.
59945998
59955999
Returns:
59966000
Tensor: The output Tensor of frac.
@@ -6025,7 +6029,7 @@ def frac(x: Tensor, name: str | None = None) -> Tensor:
60256029
)
60266030
if in_dynamic_or_pir_mode():
60276031
y = _C_ops.trunc(x)
6028-
return _C_ops.subtract(x, y)
6032+
return _C_ops.subtract(x, y, out=out)
60296033
else:
60306034
inputs = {"X": x}
60316035
attrs = {}

test/legacy_test/test_frac_api.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,5 +121,41 @@ def test_api_dygraph(self):
121121
np.testing.assert_allclose(x.grad.shape, x.shape)
122122

123123

124+
class TestFracAPI_Compatibility(unittest.TestCase):
125+
def setUp(self):
126+
self.shape = [5, 6]
127+
self.dtype = "float32"
128+
np.random.seed(2025)
129+
self.x_np = np.random.rand(*self.shape).astype(self.dtype)
130+
self.place = get_device_place()
131+
132+
def test_frac_input_arg(self):
133+
paddle.disable_static(self.place)
134+
x = paddle.to_tensor(self.x_np)
135+
out_ref = ref_frac(self.x_np)
136+
out = paddle.frac(input=x)
137+
np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05)
138+
paddle.enable_static()
139+
140+
def test_frac_output_arg(self):
141+
paddle.disable_static(self.place)
142+
x = paddle.to_tensor(self.x_np)
143+
out_ref = ref_frac(self.x_np)
144+
out = paddle.empty([])
145+
paddle.frac(x, out=out)
146+
np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05)
147+
paddle.enable_static()
148+
149+
def test_frac_tensor_output_arg(self):
150+
paddle.disable_static(self.place)
151+
x = paddle.to_tensor(self.x_np)
152+
out_ref = ref_frac(self.x_np)
153+
out1 = paddle.empty([])
154+
out2 = paddle.frac(x, out=out1)
155+
np.testing.assert_allclose(out1.numpy(), out_ref, rtol=1e-05)
156+
np.testing.assert_allclose(out2.numpy(), out_ref, rtol=1e-05)
157+
paddle.enable_static()
158+
159+
124160
if __name__ == '__main__':
125161
unittest.main()

0 commit comments

Comments
 (0)