@@ -88,13 +88,6 @@ def test_richcmp(self):
8888 with self .assertRaises (TypeError ):
8989 a = cid2 > "asdfasfa"
9090
91- def test_as_buffer (self ):
92- a = np .asarray (self .cid )
93- assert np .allclose (a , self .cid .comm_id )
94- a [:] = [ord (b'a' )] * COMM_ID_BYTES
95- assert np .allclose (a , self .cid .comm_id )
96-
97-
9891@unittest .skipUnless (MPI_IMPORTED , "Needs mpi4py module" )
9992@unittest .skipIf (get_user_gpu_rank () == - 1 , "Collective operations supported on CUDA devices only" )
10093class TestGpuComm (unittest .TestCase ):
@@ -293,19 +286,19 @@ def test_all_gather(self):
293286
294287 a = cpu .reshape ((5 , 2 ), order = 'F' )
295288 exp = texp .reshape ((5 , 2 * self .size ), order = 'F' )
296- gpu = gpuarray .asarray (a , context = self .ctx )
289+ gpu = gpuarray .asarray (a , context = self .ctx , order = 'F' )
297290 resgpu = self .gpucomm .all_gather (gpu , nd_up = 0 )
298291 check_all (resgpu , exp )
299292
300293 a = cpu .reshape ((5 , 2 ), order = 'F' )
301294 exp = texp .reshape ((5 , 2 , self .size ), order = 'F' )
302- gpu = gpuarray .asarray (a , context = self .ctx )
295+ gpu = gpuarray .asarray (a , context = self .ctx , order = 'F' )
303296 resgpu = self .gpucomm .all_gather (gpu , nd_up = 1 )
304297 check_all (resgpu , exp )
305298
306299 a = cpu .reshape ((5 , 2 ), order = 'F' )
307300 exp = texp .reshape ((5 , 2 , 1 , 1 , self .size ), order = 'F' )
308- gpu = gpuarray .asarray (a , context = self .ctx )
301+ gpu = gpuarray .asarray (a , context = self .ctx , order = 'F' )
309302 resgpu = self .gpucomm .all_gather (gpu , nd_up = 3 )
310303 check_all (resgpu , exp )
311304
0 commit comments