diff --git a/dpnp/tests/third_party/cupy/core_tests/test_include.py b/dpnp/tests/third_party/cupy/core_tests/test_include.py index 1119b13914e2..aca13ff8f667 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_include.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_include.py @@ -64,7 +64,7 @@ def _get_cuda_archs(self): def _get_options(self): return ( - "-std=c++14", + "-std=c++17", *cupy._core.core._get_cccl_include_options(), "-I{}".format(cupy._core.core._get_header_dir_path()), "-I{}".format(os.path.join(cupy.cuda.get_cuda_path(), "include")), diff --git a/dpnp/tests/third_party/cupy/core_tests/test_raw.py b/dpnp/tests/third_party/cupy/core_tests/test_raw.py index 4885e89801c9..480f6de6ae45 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_raw.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_raw.py @@ -397,24 +397,8 @@ def find_nvcc_ver(): return int(major) * 1000 + int(minor) * 10 -@testing.parameterize( - # First test NVRTC - {"backend": "nvrtc", "in_memory": False}, - # this run will read from in-memory cache - {"backend": "nvrtc", "in_memory": True}, - # this run will force recompilation - {"backend": "nvrtc", "in_memory": True, "clean_up": True}, - # Below is the same set of NVRTC tests, with Jitify turned on. For tests - # that can already pass, it shouldn't matter whether Jitify is on or not, - # and the only side effect is to add overhead. It doesn't make sense to - # test NVCC + Jitify. - {"backend": "nvrtc", "in_memory": False, "jitify": True}, - {"backend": "nvrtc", "in_memory": True, "jitify": True}, - {"backend": "nvrtc", "in_memory": True, "clean_up": True, "jitify": True}, - # Finally, we test NVCC - {"backend": "nvcc", "in_memory": False}, -) -class TestRaw(unittest.TestCase): +# TODO(leofang): Further refactor the test suite to avoid using unittest? +class _TestRawBase: _nvcc_ver = None _nvrtc_ver = None @@ -1176,6 +1160,37 @@ def test_compile_module(self): assert cupy.allclose(y, x1 + x2) +@testing.parameterize( + # First test NVRTC + {"backend": "nvrtc", "in_memory": False}, + # this run will read from in-memory cache + {"backend": "nvrtc", "in_memory": True}, + # this run will force recompilation + {"backend": "nvrtc", "in_memory": True, "clean_up": True}, + # Finally, we test NVCC + {"backend": "nvcc", "in_memory": False}, +) +class TestRaw(_TestRawBase, unittest.TestCase): + pass + + +# Recent CCCL has made Jitify cold-launch very slow, see the discussion +# starting https://github.com/cupy/cupy/pull/8899#issuecomment-2613022424. +# TODO(leofang): Further refactor the test suite? +@testing.parameterize( + # Below is the same set of NVRTC tests, with Jitify turned on. For tests + # that can already pass, it shouldn't matter whether Jitify is on or not, + # and the only side effect is to add overhead. It doesn't make sense to + # test NVCC + Jitify. + {"backend": "nvrtc", "in_memory": False, "jitify": True}, + {"backend": "nvrtc", "in_memory": True, "jitify": True}, + {"backend": "nvrtc", "in_memory": True, "clean_up": True, "jitify": True}, +) +@testing.slow +class TestRawWithJitify(_TestRawBase, unittest.TestCase): + pass + + _test_grid_sync = r""" #include @@ -1374,6 +1389,9 @@ def test_raw_picklable(self): """ +# Recent CCCL has made Jitify cold-launch very slow, see the discussion +# starting https://github.com/cupy/cupy/pull/8899#issuecomment-2613022424. +# TODO(leofang): Further refactor the test suite? @testing.parameterize( *testing.product( { @@ -1382,6 +1400,7 @@ def test_raw_picklable(self): ) ) @unittest.skipIf(cupy.cuda.runtime.is_hip, "Jitify does not support ROCm/HIP") +@testing.slow class TestRawJitify(unittest.TestCase): def setUp(self): diff --git a/dpnp/tests/third_party/cupy/fft_tests/test_fft.py b/dpnp/tests/third_party/cupy/fft_tests/test_fft.py index e7c4fb09364f..1f92d3f21974 100644 --- a/dpnp/tests/third_party/cupy/fft_tests/test_fft.py +++ b/dpnp/tests/third_party/cupy/fft_tests/test_fft.py @@ -889,7 +889,7 @@ def test_ifftn_orders(self, dtype, enable_nd): pass -# @testing.with_requires("numpy>=2.0") +@testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *testing.product( @@ -925,17 +925,6 @@ def test_rfft(self, xp, dtype): def test_irfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.irfft(a, n=self.n, norm=self.norm) - - if dtype == xp.float16 and xp is cupy: - # XXX: np2.0: f16 dtypes differ - out = out.astype(np.float16) - elif ( - xp is np - and np.lib.NumpyVersion(np.__version__) < "2.0.0" - and dtype == np.float32 - ): - out = out.astype(np.float32) - return out @@ -1008,7 +997,7 @@ def test_rfft_error_on_wrong_plan(self, dtype): assert "Target array size does not match the plan." in str(ex.value) -# @testing.with_requires("numpy>=2.0") +@testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( @@ -1069,13 +1058,6 @@ def test_irfft2(self, xp, dtype, order, enable_nd): if self.s is None and self.axes in [None, (-2, -1)]: pytest.skip("Input is not Hermitian Symmetric") - elif dtype == xp.float16 and xp is cupy: - pytest.xfail("XXX: np2.0: f16 dtypes differ") - elif ( - np.lib.NumpyVersion(np.__version__) < "2.0.0" - and dtype == np.float32 - ): - pytest.skip("dtypes differ") a = testing.shaped_random(self.shape, xp, dtype) if order == "F": @@ -1105,7 +1087,7 @@ def test_irfft2(self, dtype): xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm) -# @testing.with_requires("numpy>=2.0") +@testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( @@ -1166,13 +1148,6 @@ def test_irfftn(self, xp, dtype, order, enable_nd): if self.s is None and self.axes in [None, (-2, -1)]: pytest.skip("Input is not Hermitian Symmetric") - elif dtype == xp.float16 and xp is cupy: - pytest.xfail("XXX: np2.0: f16 dtypes differ") - elif ( - np.lib.NumpyVersion(np.__version__) < "2.0.0" - and dtype == np.float32 - ): - pytest.skip("dtypes differ") a = testing.shaped_random(self.shape, xp, dtype) if order == "F": @@ -1243,10 +1218,6 @@ def test_rfftn(self, xp, dtype, enable_nd): def test_irfftn(self, xp, dtype, enable_nd): assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) - - if dtype == xp.float16 and xp is cupy: - pytest.xfail("XXX: np2.0: f16 dtypes differ") - if xp is np: return xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) @@ -1349,7 +1320,7 @@ def test_irfftn(self, dtype): xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) -# @testing.with_requires("numpy>=2.0") +@testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *testing.product( @@ -1373,17 +1344,6 @@ class TestHfft: def test_hfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.hfft(a, n=self.n, norm=self.norm) - - if dtype == xp.float16 and xp is cupy: - # XXX: np2.0: f16 dtypes differ - out = out.astype(np.float16) - elif ( - xp is np - and np.lib.NumpyVersion(np.__version__) < "2.0.0" - and dtype == np.float32 - ): - out = out.astype(np.float32) - return out @testing.for_all_dtypes(no_complex=True) @@ -1396,16 +1356,7 @@ def test_hfft(self, xp, dtype): ) def test_ihfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.ihfft(a, n=self.n, norm=self.norm) - - if ( - xp is np - and np.lib.NumpyVersion(np.__version__) < "2.0.0" - and dtype == np.float32 - ): - out = out.astype(np.complex64) - - return out + return xp.fft.ihfft(a, n=self.n, norm=self.norm) # @testing.with_requires("numpy>=2.0") diff --git a/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py b/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py index 3c7103827cd8..14752c45be22 100644 --- a/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py +++ b/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py @@ -757,9 +757,6 @@ def test_polyval(self, xp, dtype): a1 = self._get_input(xp, self.type_l, dtype, size=5) a2 = self._get_input(xp, self.type_r, dtype, size=5) - if self.type_r == "python_scalar": - pytest.skip("XXX: np2.0: numpy always returns f64") - return xp.polyval(a1, a2) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py index 134001450ed5..9955cf3c5222 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py @@ -387,18 +387,17 @@ def test_trim_back_zeros(self, xp, dtype): a = xp.array([1, 0, 2, 3, 0, 5, 0, 0, 0], dtype=dtype) return xp.trim_zeros(a, trim=self.trim) - @pytest.mark.skip("0-d array is supported") + @testing.with_requires("numpy>=2.2.0") @testing.for_all_dtypes() - def test_trim_zero_dim(self, dtype): - for xp in (numpy, cupy): - a = testing.shaped_arange((), xp, dtype) - with pytest.raises(TypeError): - xp.trim_zeros(a, trim=self.trim) + @testing.numpy_cupy_array_equal() + def test_trim_zero_dim(self, xp, dtype): + a = testing.shaped_arange((), xp, dtype) + return xp.trim_zeros(a, trim=self.trim) - @pytest.mark.skip("nd array is supported") + # @pytest.mark.xfail(reason='XXX: Not implemented') + @testing.with_requires("numpy>=2.2.0") @testing.for_all_dtypes() - def test_trim_ndim(self, dtype): - for xp in (numpy, cupy): - a = testing.shaped_arange((2, 3), xp, dtype=dtype) - with pytest.raises(ValueError): - xp.trim_zeros(a, trim=self.trim) + @testing.numpy_cupy_array_equal() + def test_trim_ndim(self, xp, dtype): + a = testing.shaped_arange((2, 3), xp, dtype=dtype) + return xp.trim_zeros(a, trim=self.trim) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py index 4650e5130f1e..59f598c17ad9 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py @@ -199,10 +199,6 @@ def test_copyto_multigpu_noncontinguous(self, dtype): testing.assert_array_equal(expected, dst.get()) -@pytest.mark.skipif( - numpy.__version__ < "2", - reason="XXX: NP2.0: copyto is in flux in numpy 2.0.0rc2", -) @testing.parameterize( *testing.product( { diff --git a/dpnp/tests/third_party/cupy/math_tests/test_special.py b/dpnp/tests/third_party/cupy/math_tests/test_special.py index 49b9713fd480..c89718435cba 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_special.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_special.py @@ -1,7 +1,5 @@ import unittest -import pytest - import dpnp as cupy from dpnp.tests.third_party.cupy import testing @@ -14,16 +12,10 @@ def test_i0(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.i0(a) + @testing.with_requires("numpy>=2.0") @testing.for_dtypes(["e", "f", "d", "F", "D"]) @testing.numpy_cupy_allclose(atol=1e-3) def test_sinc(self, xp, dtype): - - if dtype in [cupy.float16, cupy.float32, cupy.complex64]: - pytest.xfail( - reason="XXX: np2.0: numpy 1.26 uses a wrong " - "promotion; numpy 2.0 is OK" - ) - a = testing.shaped_random((2, 3), xp, dtype, scale=1) return xp.sinc(a) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_generator.py b/dpnp/tests/third_party/cupy/random_tests/test_generator.py index 7f8ca2f60cbf..2aa5aba73b4a 100644 --- a/dpnp/tests/third_party/cupy/random_tests/test_generator.py +++ b/dpnp/tests/third_party/cupy/random_tests/test_generator.py @@ -914,7 +914,7 @@ def test_dtype_shape(self): if isinstance(self.a, numpy.ndarray): expected_dtype = "float" else: - expected_dtype = "int" + expected_dtype = "long" assert v.dtype == expected_dtype assert v.shape == expected_shape diff --git a/dpnp/tests/third_party/cupy/sorting_tests/test_search.py b/dpnp/tests/third_party/cupy/sorting_tests/test_search.py index 24fc17f5204e..70b076ce1aaa 100644 --- a/dpnp/tests/third_party/cupy/sorting_tests/test_search.py +++ b/dpnp/tests/third_party/cupy/sorting_tests/test_search.py @@ -328,6 +328,31 @@ def test_where_two_arrays(self, xp, cond_type, x_type, y_type): return xp.where(cond, x, y) +@testing.with_requires("numpy>=2.0") +@testing.parameterize( + {"scalar_value": 1}, + {"scalar_value": 1.0}, + {"scalar_value": 1 + 2j}, +) +class TestWhereArrayAndScalar: + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + def test_where_array_scalar(self, xp, dtype): + cond = testing.shaped_random((2, 3, 4), xp, xp.bool_) + x = testing.shaped_random((2, 3, 4), xp, dtype, seed=0) + y = self.scalar_value + return xp.where(cond, x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + def test_where_scalar_array(self, xp, dtype): + cond = testing.shaped_random((2, 3, 4), xp, xp.bool_) + x = self.scalar_value + y = testing.shaped_random((2, 3, 4), xp, dtype, seed=0) + return xp.where(cond, x, y) + + @testing.parameterize( {"cond_shape": (2, 3, 4)}, {"cond_shape": (4,)}, diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py b/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py index 960d7cb07b79..16f96417ab3c 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py @@ -497,7 +497,6 @@ def test_histogramdd(self, xp, dtype): weights = xp.ones((x.shape[0],), dtype=self.weights_dtype) else: weights = None - y, bin_edges = xp.histogramdd( x, bins=bins, @@ -549,6 +548,13 @@ def test_histogramdd_invalid_range(self): with pytest.raises(ValueError): y, bin_edges = xp.histogramdd(x, range=r) + @pytest.mark.skip("list of bins is allowed") + def test_histogramdd_disallow_arraylike_bins(self): + x = testing.shaped_random((16, 2), cupy, scale=100) + bins = [[0, 10, 20, 50, 90]] * 2 # too many dimensions + with pytest.raises(ValueError): + y, bin_edges = cupy.histogramdd(x, bins=bins) + @testing.parameterize( *testing.product( @@ -596,9 +602,10 @@ def test_histogram2d(self, xp, dtype): class TestHistogram2dErrors(unittest.TestCase): + @pytest.mark.skip("list of bins is allowed") def test_histogram2d_disallow_arraylike_bins(self): x = testing.shaped_random((16,), cupy, scale=100) y = testing.shaped_random((16,), cupy, scale=100) bins = [0, 10, 20, 50, 90] with pytest.raises(ValueError): - y, bin_edges = cupy.histogram2d(x, y, bins=bins) + y, _, _ = cupy.histogram2d(x, y, bins=bins) diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_order.py b/dpnp/tests/third_party/cupy/statistics_tests/test_order.py index 2dbbb2ace738..b990c1eeb2c7 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_order.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_order.py @@ -252,6 +252,13 @@ def test_quantile_out_of_range_q(self, dtype, method): with pytest.raises(ValueError): xp.quantile(a, q, axis=-1, method=method) + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_quantile_axis_and_keepdims(self, xp, dtype, method): + a = testing.shaped_random((1, 6, 3, 2), xp, dtype) + q = testing.shaped_random((5,), xp, scale=1) + return xp.quantile(a, q, axis=0, keepdims=True, method=method) + class TestOrder: