Skip to content

Commit 409515d

Browse files
authored
[API compatibility] add paddle.Tensor.ravel (#74454)
* add ravel api * use paddle.Tensor.ravel for testcase * replace ravel param x with input * change copyright time * add only Tensor.ravel
1 parent a3e6c07 commit 409515d

File tree

2 files changed

+238
-0
lines changed

2 files changed

+238
-0
lines changed

python/paddle/tensor/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -680,6 +680,7 @@
680680
'expand',
681681
'broadcast_to',
682682
'expand_as',
683+
'ravel',
683684
'flatten',
684685
'flatten_',
685686
'gather',

test/legacy_test/test_ravel_op.py

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,237 @@
1+
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
17+
import numpy as np
18+
from op_test import OpTest, convert_float_to_uint16
19+
20+
import paddle
21+
from paddle.base import core
22+
23+
24+
class TestRavelOp(OpTest):
25+
def setUp(self):
26+
self.python_api = paddle.Tensor.ravel
27+
self.public_python_api = paddle.Tensor.ravel
28+
self.python_out_sig = ["Out"]
29+
self.op_type = "flatten_contiguous_range"
30+
self.prim_op_type = "comp"
31+
self.start_axis = 0
32+
self.stop_axis = -1
33+
self.if_enable_cinn()
34+
self.init_test_case()
35+
self.init_test_dtype()
36+
self.init_input_data()
37+
self.init_attrs()
38+
self.outputs = {
39+
"Out": self.inputs["X"].reshape(self.new_shape),
40+
"XShape": np.random.random(self.in_shape).astype("float32"),
41+
}
42+
43+
def if_enable_cinn(self):
44+
pass
45+
46+
def test_check_output(self):
47+
if str(self.dtype) in {"float16", "uint16"}:
48+
self.check_output_with_place(
49+
core.CUDAPlace(0),
50+
no_check_set=["XShape"],
51+
check_prim=True,
52+
check_pir=True,
53+
check_prim_pir=True,
54+
)
55+
else:
56+
self.check_output(
57+
no_check_set=["XShape"],
58+
check_prim=True,
59+
check_pir=True,
60+
check_prim_pir=True,
61+
)
62+
63+
def test_check_grad(self):
64+
if str(self.dtype) in {"float16", "uint16"}:
65+
self.check_grad_with_place(
66+
core.CUDAPlace(0),
67+
["X"],
68+
"Out",
69+
check_prim=True,
70+
check_pir=True,
71+
)
72+
else:
73+
self.check_grad(["X"], "Out", check_prim=True, check_pir=True)
74+
75+
def init_test_case(self):
76+
self.in_shape = (3, 2, 5, 4)
77+
self.start_axis = 0
78+
self.stop_axis = -1
79+
self.new_shape = 120
80+
81+
def init_attrs(self):
82+
self.attrs = {
83+
"start_axis": self.start_axis,
84+
"stop_axis": self.stop_axis,
85+
}
86+
87+
def init_test_dtype(self):
88+
self.dtype = "float64"
89+
90+
def init_input_data(self):
91+
if str(self.dtype) != "uint16":
92+
x = np.random.random(self.in_shape).astype(self.dtype)
93+
else:
94+
x = np.random.random(self.in_shape).astype("float32")
95+
x = convert_float_to_uint16(x)
96+
97+
self.inputs = {"X": x}
98+
99+
100+
class TestRavelFP32Op(TestRavelOp):
101+
def init_test_dtype(self):
102+
self.dtype = "float32"
103+
104+
105+
@unittest.skipIf(
106+
not core.is_compiled_with_cuda(),
107+
"core is not compiled with CUDA",
108+
)
109+
class TestRavelFP16Op(TestRavelOp):
110+
def init_test_dtype(self):
111+
self.dtype = "float16"
112+
113+
114+
@unittest.skipIf(
115+
not core.is_compiled_with_cuda()
116+
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
117+
"core is not compiled with CUDA and not support the bfloat16",
118+
)
119+
class TestRavelBF16Op(TestRavelOp):
120+
def if_enable_cinn(self):
121+
pass
122+
123+
def init_test_dtype(self):
124+
self.dtype = "uint16"
125+
126+
127+
class TestRavelOp_ZeroDim(TestRavelOp):
128+
def init_test_case(self):
129+
self.in_shape = ()
130+
self.start_axis = 0
131+
self.stop_axis = -1
132+
self.new_shape = (1,)
133+
134+
def if_enable_cinn(self):
135+
self.enable_cinn = False
136+
137+
def init_attrs(self):
138+
self.attrs = {
139+
"start_axis": self.start_axis,
140+
"stop_axis": self.stop_axis,
141+
}
142+
143+
144+
class TestRavelFP32Op_ZeroDim(TestRavelOp_ZeroDim):
145+
def init_test_dtype(self):
146+
self.dtype = "float32"
147+
148+
149+
@unittest.skipIf(
150+
not core.is_compiled_with_cuda(),
151+
"core is not compiled with CUDA",
152+
)
153+
class TestRavelFP16Op_ZeroDim(TestRavelOp_ZeroDim):
154+
def init_test_dtype(self):
155+
self.dtype = "float16"
156+
157+
158+
class TestRavelOpError(unittest.TestCase):
159+
def test_errors(self):
160+
image_shape = (2, 3, 4, 4)
161+
x = (
162+
np.arange(
163+
image_shape[0]
164+
* image_shape[1]
165+
* image_shape[2]
166+
* image_shape[3]
167+
).reshape(image_shape)
168+
/ 100.0
169+
)
170+
x = x.astype('float32')
171+
172+
def test_InputError():
173+
out = paddle.Tensor.ravel(x)
174+
175+
self.assertRaises(ValueError, test_InputError)
176+
177+
178+
class TestStaticRavelPythonAPI(unittest.TestCase):
179+
def execute_api(self, x):
180+
return paddle.Tensor.ravel(x)
181+
182+
def test_static_api(self):
183+
paddle.enable_static()
184+
np_x = np.random.rand(2, 3, 4, 4).astype('float32')
185+
186+
main_prog = paddle.static.Program()
187+
with paddle.static.program_guard(main_prog, paddle.static.Program()):
188+
x = paddle.static.data(
189+
name="x", shape=[2, 3, 4, 4], dtype='float32'
190+
)
191+
out = self.execute_api(x)
192+
193+
exe = paddle.static.Executor(place=paddle.CPUPlace())
194+
fetch_out = exe.run(main_prog, feed={"x": np_x}, fetch_list=[out])
195+
self.assertTrue((96,) == fetch_out[0].shape)
196+
197+
198+
class TestStaticRavelInferShapePythonAPI(unittest.TestCase):
199+
def execute_api(self, x):
200+
return paddle.Tensor.ravel(x)
201+
202+
def test_static_api(self):
203+
paddle.enable_static()
204+
main_prog = paddle.static.Program()
205+
with paddle.static.program_guard(main_prog, paddle.static.Program()):
206+
x = paddle.static.data(
207+
name="x", shape=[-1, 3, -1, -1], dtype='float32'
208+
)
209+
out = self.execute_api(x)
210+
self.assertTrue((-1,) == tuple(out.shape))
211+
212+
213+
class TestRavelZeroSizedTensorAPI(unittest.TestCase):
214+
def test_dygraph(self):
215+
paddle.disable_static()
216+
data = np.random.randn(2, 3, 0)
217+
x = paddle.to_tensor(data)
218+
out = paddle.Tensor.ravel(x)
219+
out_np = data.flatten()
220+
np.testing.assert_equal(out.numpy(), out_np)
221+
222+
def test_static(self):
223+
paddle.enable_static()
224+
data = np.random.randn(2, 3, 0)
225+
main_prog = paddle.static.Program()
226+
with paddle.static.program_guard(main_prog, paddle.static.Program()):
227+
x = paddle.static.data(name="x", shape=[2, 3, 0], dtype='float64')
228+
out = paddle.Tensor.ravel(x)
229+
230+
exe = paddle.static.Executor(place=paddle.CPUPlace())
231+
fetch_out = exe.run(main_prog, feed={"x": data}, fetch_list=[out])[0]
232+
out_np = data.flatten()
233+
np.testing.assert_equal(fetch_out, out_np)
234+
235+
236+
if __name__ == "__main__":
237+
unittest.main()

0 commit comments

Comments
 (0)