1- import unittest
2-
31import dpctl
42import dpctl .tensor ._dlpack as dlp
53import numpy
@@ -31,18 +29,54 @@ def _gen_array(dtype, alloc_q=None):
3129 return array
3230
3331
34- class TestDLPackConversion (unittest .TestCase ):
32+ class DLDummy :
33+ """Dummy object to wrap a __dlpack__ capsule, so we can use from_dlpack."""
34+
35+ def __init__ (self , capsule , device ):
36+ self .capsule = capsule
37+ self .device = device
38+
39+ def __dlpack__ (self , * args , ** kwargs ):
40+ return self .capsule
41+
42+ def __dlpack_device__ (self ):
43+ return self .device
44+
45+
46+ @pytest .mark .skip ("toDlpack() and fromDlpack() are not supported" )
47+ class TestDLPackConversion :
48+
49+ @pytest .mark .filterwarnings ("ignore::DeprecationWarning" )
3550 @testing .for_all_dtypes (no_bool = False )
3651 def test_conversion (self , dtype ):
3752 orig_array = _gen_array (dtype )
38- tensor = orig_array .__dlpack__ ()
39- out_array = dlp . from_dlpack_capsule (tensor )
53+ tensor = orig_array .toDlpack ()
54+ out_array = cupy . fromDlpack (tensor )
4055 testing .assert_array_equal (orig_array , out_array )
41- assert orig_array .get_array ()._pointer == out_array ._pointer
56+ assert orig_array .get_array ()._pointer == out_array .get_array ()._pointer
57+
58+
59+ class TestNewDLPackConversion :
4260
61+ @pytest .fixture (
62+ autouse = True , params = ["device" ]
63+ ) # "managed" is not supported
64+ def pool (self , request ):
65+ self .memory = request .param
66+ if self .memory == "managed" :
67+ old_pool = cupy .get_default_memory_pool ()
68+ new_pool = cuda .MemoryPool (cuda .malloc_managed )
69+ cuda .set_allocator (new_pool .malloc )
70+
71+ yield
72+
73+ cuda .set_allocator (old_pool .malloc )
74+ else :
75+ # Nothing to do, we can use the default pool.
76+ yield
77+
78+ del self .memory
4379
44- @testing .parameterize (* testing .product ({"memory" : ("device" , "managed" )}))
45- class TestNewDLPackConversion (unittest .TestCase ):
4680 def _get_stream (self , stream_name ):
4781 if stream_name == "null" :
4882 return dpctl .SyclQueue ()
@@ -55,6 +89,114 @@ def test_conversion(self, dtype):
5589 testing .assert_array_equal (orig_array , out_array )
5690 assert orig_array .get_array ()._pointer == out_array .get_array ()._pointer
5791
92+ @pytest .mark .skip ("no limitations in from_dlpack()" )
93+ def test_from_dlpack_and_conv_errors (self ):
94+ orig_array = _gen_array ("int8" )
95+
96+ with pytest .raises (NotImplementedError ):
97+ cupy .from_dlpack (orig_array , device = orig_array .device )
98+
99+ with pytest .raises (BufferError ):
100+ # Currently CuPy's `__dlpack__` only allows `copy=True`
101+ # for host copies.
102+ cupy .from_dlpack (orig_array , copy = True )
103+
104+ @pytest .mark .parametrize (
105+ "kwargs, versioned" ,
106+ [
107+ ({}, False ),
108+ ({"max_version" : None }, False ),
109+ ({"max_version" : (1 , 0 )}, True ),
110+ ({"max_version" : (10 , 10 )}, True ),
111+ ({"max_version" : (0 , 8 )}, False ),
112+ ],
113+ )
114+ def test_conversion_max_version (self , kwargs , versioned ):
115+ orig_array = _gen_array ("int8" )
116+
117+ capsule = orig_array .__dlpack__ (** kwargs )
118+ # We can identify if the version is correct via the name:
119+ if versioned :
120+ assert '"dltensor_versioned"' in str (capsule )
121+ else :
122+ assert '"dltensor"' in str (capsule )
123+
124+ out_array = cupy .from_dlpack (
125+ DLDummy (capsule , orig_array .__dlpack_device__ ())
126+ )
127+
128+ testing .assert_array_equal (orig_array , out_array )
129+ assert orig_array .get_array ()._pointer == out_array .get_array ()._pointer
130+
131+ def test_conversion_device (self ):
132+ orig_array = _gen_array ("float32" )
133+
134+ # If the device is identical, then we support it:
135+ capsule = orig_array .__dlpack__ (
136+ dl_device = orig_array .__dlpack_device__ ()
137+ )
138+ out_array = cupy .from_dlpack (
139+ DLDummy (capsule , orig_array .__dlpack_device__ ())
140+ )
141+
142+ testing .assert_array_equal (orig_array , out_array )
143+ assert orig_array .get_array ()._pointer == out_array .get_array ()._pointer
144+
145+ @pytest .mark .skip ("no BufferError exception for bad device" )
146+ def test_conversion_bad_device (self ):
147+ arr = _gen_array ("float32" )
148+
149+ # invalid device ID
150+ with pytest .raises (BufferError ):
151+ arr .__dlpack__ (dl_device = (arr .__dlpack_device__ ()[0 ], 2 ** 30 ))
152+
153+ # Simple, non-matching device:
154+ with pytest .raises (BufferError ):
155+ arr .__dlpack__ (dl_device = (9 , 0 ))
156+
157+ @pytest .mark .skip ("not supported conversion to CPU" )
158+ def test_conversion_device_to_cpu (self ):
159+ # NOTE: This defaults to the old unversioned, which is needed for
160+ # NumPy 1.x support.
161+ # If (and only if) the device is managed, we also support exporting
162+ # to CPU.
163+ orig_array = _gen_array ("float32" )
164+
165+ arr1 = numpy .from_dlpack (
166+ DLDummy (orig_array .__dlpack__ (dl_device = (1 , 0 )), device = (1 , 0 ))
167+ )
168+ arr2 = numpy .from_dlpack (
169+ DLDummy (orig_array .__dlpack__ (dl_device = (1 , 0 )), device = (1 , 0 ))
170+ )
171+
172+ numpy .testing .assert_array_equal (orig_array .get (), arr1 )
173+ assert orig_array .dtype == arr1 .dtype
174+ # Arrays share the same memory exactly when memory is managed.
175+ assert numpy .may_share_memory (arr1 , arr2 ) == (self .memory == "managed" )
176+
177+ arr_copy = numpy .from_dlpack (
178+ DLDummy (
179+ orig_array .__dlpack__ (dl_device = (1 , 0 ), copy = True ),
180+ device = (1 , 0 ),
181+ )
182+ )
183+ # The memory must not be shared with with a copy=True request
184+ assert not numpy .may_share_memory (arr_copy , arr1 )
185+ numpy .testing .assert_array_equal (arr1 , arr_copy )
186+
187+ # Also test copy=False
188+ if self .memory != "managed" :
189+ with pytest .raises (ValueError ):
190+ orig_array .__dlpack__ (dl_device = (1 , 0 ), copy = False )
191+ else :
192+ arr_nocopy = numpy .from_dlpack (
193+ DLDummy (
194+ orig_array .__dlpack__ (dl_device = (1 , 0 ), copy = False ),
195+ device = (1 , 0 ),
196+ )
197+ )
198+ assert numpy .may_share_memory (arr_nocopy , arr1 )
199+
58200 def test_stream (self ):
59201 allowed_streams = ["null" , True ]
60202
@@ -73,48 +215,61 @@ def test_stream(self):
73215 )
74216
75217
76- class TestDLTensorMemory (unittest .TestCase ):
77- # def setUp(self):
78- # self.old_pool = cupy.get_default_memory_pool()
79- # self.pool = cupy.cuda.MemoryPool()
80- # cupy.cuda.set_allocator(self.pool.malloc)
218+ class TestDLTensorMemory :
219+
220+ @pytest .fixture
221+ def pool (self ):
222+ pass
223+
224+ # old_pool = cupy.get_default_memory_pool()
225+ # pool = cupy.cuda.MemoryPool()
226+ # cupy.cuda.set_allocator(pool.malloc)
227+
228+ # yield pool
81229
82- # def tearDown(self):
83- # self.pool.free_all_blocks()
84- # cupy.cuda.set_allocator(self.old_pool.malloc)
230+ # pool.free_all_blocks()
231+ # cupy.cuda.set_allocator(old_pool.malloc)
85232
86- def test_deleter (self ):
233+ @pytest .mark .parametrize ("max_version" , [None , (1 , 0 )])
234+ def test_deleter (self , pool , max_version ):
87235 # memory is freed when tensor is deleted, as it's not consumed
88236 array = cupy .empty (10 )
89- tensor = array .__dlpack__ ()
237+ tensor = array .__dlpack__ (max_version = max_version )
90238 # str(tensor): <capsule object "dltensor" at 0x7f7c4c835330>
91- assert '"dltensor"' in str (tensor )
92- # assert self.pool.n_free_blocks() == 0
239+ name = "dltensor" if max_version is None else "dltensor_versioned"
240+ assert f'"{ name } "' in str (tensor )
241+ # assert pool.n_free_blocks() == 0
93242 # del array
94- # assert self. pool.n_free_blocks() == 0
243+ # assert pool.n_free_blocks() == 0
95244 # del tensor
96- # assert self. pool.n_free_blocks() == 1
245+ # assert pool.n_free_blocks() == 1
97246
98- def test_deleter2 (self ):
247+ @pytest .mark .parametrize ("max_version" , [None , (1 , 0 )])
248+ def test_deleter2 (self , pool , max_version ):
99249 # memory is freed when array2 is deleted, as tensor is consumed
100250 array = cupy .empty (10 )
101- tensor = array .__dlpack__ ()
102- assert '"dltensor"' in str (tensor )
103- array2 = dlp .from_dlpack_capsule (tensor )
104- assert '"used_dltensor"' in str (tensor )
105- # assert self.pool.n_free_blocks() == 0
251+ tensor = array .__dlpack__ (max_version = max_version )
252+ name = "dltensor" if max_version is None else "dltensor_versioned"
253+ assert f'"{ name } "' in str (tensor )
254+ array2 = cupy .from_dlpack (
255+ DLDummy (tensor , device = array .__dlpack_device__ ())
256+ )
257+ assert f'"used_{ name } "' in str (tensor )
258+ # assert pool.n_free_blocks() == 0
106259 # del array
107- # assert self. pool.n_free_blocks() == 0
260+ # assert pool.n_free_blocks() == 0
108261 # del array2
109- # assert self. pool.n_free_blocks() == 1
262+ # assert pool.n_free_blocks() == 1
110263 # del tensor
111- # assert self. pool.n_free_blocks() == 1
264+ # assert pool.n_free_blocks() == 1
112265
266+ @pytest .mark .skip ("toDlpack() and fromDlpack() are not supported" )
267+ @pytest .mark .filterwarnings ("ignore::DeprecationWarning" )
113268 def test_multiple_consumption_error (self ):
114269 # Prevent segfault, see #3611
115270 array = cupy .empty (10 )
116- tensor = array .__dlpack__ ()
117- array2 = dlp . from_dlpack_capsule (tensor )
271+ tensor = array .toDlpack ()
272+ array2 = cupy . fromDlpack (tensor )
118273 with pytest .raises (ValueError ) as e :
119- array3 = dlp . from_dlpack_capsule (tensor )
274+ array3 = cupy . fromDlpack (tensor )
120275 assert "consumed multiple times" in str (e .value )
0 commit comments