diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ded4ad9b37ac..8fb177f21990 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,7 +34,8 @@ repos: (?x)^( dpnp/tests/test_arraycreation.py| dpnp/tests/test_sycl_queue.py| - dpnp/tests/test_usm_type.py + dpnp/tests/test_usm_type.py| + dpnp/tests/third_party/cupy/core_tests/test_nep50_examples.py )$ - id: python-no-log-warn - id: python-use-type-annotations diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 97ba8a0720e0..6456b4b5072a 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -150,7 +150,7 @@ def mT(self): if self.ndim < 2: raise ValueError("matrix transpose with ndim < 2 is undefined") - return self._array_obj.mT + return dpnp_array._create_from_usm_ndarray(self._array_obj.mT) def to_device(self, target_device): """Transfer array to target device.""" diff --git a/dpnp/random/dpnp_iface_random.py b/dpnp/random/dpnp_iface_random.py index 49d2adad2c28..2e4e55af7f18 100644 --- a/dpnp/random/dpnp_iface_random.py +++ b/dpnp/random/dpnp_iface_random.py @@ -1022,25 +1022,31 @@ def power(a, size=None): return call_origin(numpy.random.power, a, size) -def rand(d0, *dn, device=None, usm_type="device", sycl_queue=None): +def rand(*args, device=None, usm_type="device", sycl_queue=None): """ Random values in a given shape. - Create an array of the given shape and populate it with random samples - from a uniform distribution over [0, 1). + Create an array of the given shape and populate it with random samples from + a uniform distribution over ``[0, 1)``. For full documentation refer to :obj:`numpy.random.rand`. Parameters ---------- + *args : sequence of ints, optional + The dimensions of the returned array, must be non-negative. + If no argument is given a single Python float is returned. device : {None, string, SyclDevice, SyclQueue}, optional An array API concept of device where the output array is created. - The `device` can be ``None`` (the default), an OneAPI filter selector string, - an instance of :class:`dpctl.SyclDevice` corresponding to a non-partitioned SYCL device, - an instance of :class:`dpctl.SyclQueue`, or a `Device` object returned by + The `device` can be ``None`` (the default), an OneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of :class:`dpctl.SyclQueue`, + or a `Device` object returned by :obj:`dpnp.dpnp_array.dpnp_array.device` property. + Default: ``None``. usm_type : {"device", "shared", "host"}, optional The type of SYCL USM allocation for the output array. + Default: ``"device"``. sycl_queue : {None, SyclQueue}, optional A SYCL queue to use for output array allocation and copying. The `sycl_queue` can be passed as ``None`` (the default), which means @@ -1051,23 +1057,27 @@ def rand(d0, *dn, device=None, usm_type="device", sycl_queue=None): Returns ------- out : dpnp.ndarray - Random values in a given shape. - Output array data type is :obj:`dpnp.float64` if device supports it, or :obj:`dpnp.float32` otherwise. + Random values in a given shape ``(d0, d1, ..., dn)``. + Output array data type is :obj:`dpnp.float64` if a device supports it, + or :obj:`dpnp.float32` type otherwise. - Examples + See Also -------- - >>> s = dpnp.random.rand(3, 2) + :obj:`dpnp.random.random` : Return random floats in the half-open interval + ``[0.0, 1.0)``. + :obj:`dpnp.random.random_sample` : Return random floats in the half-open + interval ``[0.0, 1.0)``. + :obj:`dpnp.random.uniform` : Draw samples from a uniform distribution. - See Also + Examples -------- - :obj:`dpnp.random.random` - :obj:`dpnp.random.random_sample` - :obj:`dpnp.random.uniform` + >>> import dpnp as np + >>> s = np.random.rand(3, 2) """ rs = _get_random_state(device=device, sycl_queue=sycl_queue) - return rs.rand(d0, *dn, usm_type=usm_type) + return rs.rand(*args, usm_type=usm_type) def randint( diff --git a/dpnp/tests/skipped_tests.tbl b/dpnp/tests/skipped_tests.tbl index db1beb0c6fc9..7a7e140972dd 100644 --- a/dpnp/tests/skipped_tests.tbl +++ b/dpnp/tests/skipped_tests.tbl @@ -17,333 +17,3 @@ tests/test_umath.py::test_umaths[('divmod', 'ff')] tests/test_umath.py::test_umaths[('divmod', 'dd')] tests/test_umath.py::test_umaths[('frexp', 'f')] tests/test_umath.py::test_umaths[('frexp', 'd')] - -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_0_{shape=()}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_1_{shape=(1,)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_2_{shape=(2, 3)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_3_{order='C', shape=(2, 3)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_4_{order='F', shape=(2, 3)}::test_item - -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_copied -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_transposed - -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_1_{order='C', shape=(10, 20)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_1_{order='C', shape=(10, 20)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_2_{order='C', shape=(10, 20, 30)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_2_{order='C', shape=(10, 20, 30)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_3_{order='C', shape=(10, 20, 30, 40)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_3_{order='C', shape=(10, 20, 30, 40)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_4_{order='F', shape=(10,)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_4_{order='F', shape=(10,)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_5_{order='F', shape=(10, 20)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_5_{order='F', shape=(10, 20)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_6_{order='F', shape=(10, 20, 30)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_6_{order='F', shape=(10, 20, 30)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_min - -tests/third_party/cupy/indexing_tests/test_insert.py::TestPutmaskDifferentDtypes::test_putmask_differnt_dtypes_raises -tests/third_party/cupy/indexing_tests/test_insert.py::TestPutmask::test_putmask_non_equal_shape_raises - -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_base -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_copy -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_copy_next -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_len -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_0_{index=None, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_0_{index=None, shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_1_{index=(0,), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_1_{index=(0,), shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_2_{index=True, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_2_{index=True, shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_3_{index=[0], shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_3_{index=[0], shape=(2, 3, 4)}::test_setitem - -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_0_{shapes=[(), ()]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_10_{shapes=[(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_1_{shapes=[(0,), (0,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_2_{shapes=[(1,), (1,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_3_{shapes=[(2,), (2,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_4_{shapes=[(0,), (1,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_5_{shapes=[(2, 3), (1, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_6_{shapes=[(2, 1, 3, 4), (3, 1, 4)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_7_{shapes=[(4, 3, 2, 3), (2, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_8_{shapes=[(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_9_{shapes=[(0, 1, 1, 3), (2, 1, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_0_{shapes=[(3,), (2,)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_1_{shapes=[(3, 2), (2, 3)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_2_{shapes=[(3, 2), (3, 4)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_3_{shapes=[(0,), (2,)]}::test_invalid_broadcast - -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_period -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_left_right -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_fy -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_fx -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_x -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_fy -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_fx -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_x -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_size1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_to_nan - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_0_{a_shape=(), b_shape=(), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_1_{a_shape=(), b_shape=(), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_2_{a_shape=(), b_shape=(3, 2), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_3_{a_shape=(), b_shape=(3, 2), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_4_{a_shape=(3, 2), b_shape=(), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_5_{a_shape=(3, 2), b_shape=(), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_1_{scale_shape=(), shape=(3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_2_{scale_shape=(), shape=None}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_0_{p_shape=(), shape=(4, 3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_1_{p_shape=(), shape=(3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_2_{p_shape=(3, 2), shape=(4, 3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_3_{p_shape=(3, 2), shape=(3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_0_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_1_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_2_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_3_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_4_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_5_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_6_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_7_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_8_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_9_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_10_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_11_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_12_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_13_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_14_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_15_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_16_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_17_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_18_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_19_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_20_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_21_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_22_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_23_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_24_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_25_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_26_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_27_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_28_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_29_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_30_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_31_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLognormal_param_0_{mean_shape=(), shape=(4, 3, 2), sigma_shape=()}::test_lognormal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLognormal_param_1_{mean_shape=(), shape=(3, 2), sigma_shape=()}::test_lognormal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_0_{p_shape=(), shape=(4, 3, 2)}::test_logseries -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_0_{p_shape=(), shape=(4, 3, 2)}::test_logseries_for_invalid_p -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_1_{p_shape=(), shape=(3, 2)}::test_logseries -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_1_{p_shape=(), shape=(3, 2)}::test_logseries_for_invalid_p -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_0_{d=2, shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_1_{d=2, shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_2_{d=4, shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_3_{d=4, shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_0_{n_shape=(), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_0_{n_shape=(), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_1_{n_shape=(), p_shape=(), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_1_{n_shape=(), p_shape=(), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_2_{n_shape=(), p_shape=(3, 2), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_2_{n_shape=(), p_shape=(3, 2), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_3_{n_shape=(), p_shape=(3, 2), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_3_{n_shape=(), p_shape=(3, 2), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_4_{n_shape=(3, 2), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_4_{n_shape=(3, 2), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_5_{n_shape=(3, 2), p_shape=(), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_5_{n_shape=(3, 2), p_shape=(), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_0_{df_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_1_{df_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_2_{df_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_3_{df_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_4_{df_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_5_{df_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_0_{dfden_shape=(), dfnum_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_10_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_11_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_12_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_13_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_1_{dfden_shape=(), dfnum_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_2_{dfden_shape=(), dfnum_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_3_{dfden_shape=(), dfnum_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_4_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_5_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_6_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_7_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_8_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_9_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_f - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_0_{lam_shape=(), shape=(4, 3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_1_{lam_shape=(), shape=(3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_2_{lam_shape=(3, 2), shape=(4, 3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_3_{lam_shape=(3, 2), shape=(3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_0_{a_shape=(), shape=(4, 3, 2)}::test_power -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_0_{a_shape=(), shape=(4, 3, 2)}::test_power_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_1_{a_shape=(), shape=(3, 2)}::test_power -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_1_{a_shape=(), shape=(3, 2)}::test_power_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh_for_negative_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh_for_negative_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_0_{shape=(4, 3, 2), shape_shape=()}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_2_{shape=(3, 2), shape_shape=()}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_0_{left_shape=(), mode_shape=(), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_10_{left_shape=(3, 2), mode_shape=(), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_11_{left_shape=(3, 2), mode_shape=(), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_12_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_13_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_1_{left_shape=(), mode_shape=(), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_2_{left_shape=(), mode_shape=(), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_3_{left_shape=(), mode_shape=(), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_4_{left_shape=(), mode_shape=(3, 2), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_5_{left_shape=(), mode_shape=(3, 2), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_6_{left_shape=(), mode_shape=(3, 2), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_7_{left_shape=(), mode_shape=(3, 2), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_8_{left_shape=(3, 2), mode_shape=(), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_9_{left_shape=(3, 2), mode_shape=(), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_0_{high_shape=(), low_shape=(), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_1_{high_shape=(), low_shape=(), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_4_{high_shape=(3, 2), low_shape=(), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_5_{high_shape=(3, 2), low_shape=(), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_0_{mean_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_1_{mean_shape=(), scale_shape=(), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_2_{mean_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_3_{mean_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_4_{mean_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_5_{mean_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsZipf_param_0_{a_shape=(), shape=(4, 3, 2)}::test_zipf -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsZipf_param_1_{a_shape=(), shape=(3, 2)}::test_zipf - -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_no_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_p_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_replace_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_replace_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_replace_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_replace_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_0_{size=None}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_1_{size=()}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_2_{size=4}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_3_{size=(0,)}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_4_{size=(1, 0)}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_bound_float1 -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit_2 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_1 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_2 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit_2 - -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_unxpected_method diff --git a/dpnp/tests/skipped_tests_gpu.tbl b/dpnp/tests/skipped_tests_gpu.tbl index 797f1ecf86c3..7eddf6a27297 100644 --- a/dpnp/tests/skipped_tests_gpu.tbl +++ b/dpnp/tests/skipped_tests_gpu.tbl @@ -25,335 +25,4 @@ tests/test_umath.py::test_umaths[('floor_divide', 'ff')] tests/test_umath.py::test_umaths[('frexp', 'f')] tests/test_umath.py::test_umaths[('frexp', 'd')] -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_2_{p_shape=(3, 2), shape=(4, 3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_3_{p_shape=(3, 2), shape=(3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_0_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_1_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_2_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_3_{nbad_shape=(), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_4_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_5_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_6_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_7_{nbad_shape=(), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_8_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_9_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_10_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_11_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_12_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_13_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_14_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_15_{nbad_shape=(), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_16_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_17_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_18_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_19_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_20_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_21_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_22_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_23_{nbad_shape=(3, 2), ngood_shape=(), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_24_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_25_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_26_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_27_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int32, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_28_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_29_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_30_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(4, 3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsHyperGeometric_param_31_{nbad_shape=(3, 2), ngood_shape=(3, 2), nsample_dtype=int64, nsample_shape=(3, 2), shape=(3, 2)}::test_hypergeometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_0_{lam_shape=(), shape=(4, 3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_1_{lam_shape=(), shape=(3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_2_{lam_shape=(3, 2), shape=(4, 3, 2)}::test_poisson -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPoisson_param_3_{lam_shape=(3, 2), shape=(3, 2)}::test_poisson - -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_0_{shape=()}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_1_{shape=(1,)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_2_{shape=(2, 3)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_3_{order='C', shape=(2, 3)}::test_item -tests/third_party/cupy/core_tests/test_ndarray_conversion.py::TestNdarrayToBytes_param_4_{order='F', shape=(2, 3)}::test_item - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_0_{d=2, shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_1_{d=2, shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_2_{d=4, shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsMultivariateNormal_param_3_{d=4, shape=(3, 2)}::test_normal - tests/third_party/intel/test_zero_copy_test1.py::test_dpnp_interaction_with_dpctl_memory - -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_copied -tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py::TestArrayFlatten::test_flatten_order_transposed - -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_0_{order='C', shape=(10,)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_1_{order='C', shape=(10, 20)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_1_{order='C', shape=(10, 20)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_2_{order='C', shape=(10, 20, 30)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_2_{order='C', shape=(10, 20, 30)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_3_{order='C', shape=(10, 20, 30, 40)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_3_{order='C', shape=(10, 20, 30, 40)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_4_{order='F', shape=(10,)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_4_{order='F', shape=(10,)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_5_{order='F', shape=(10, 20)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_5_{order='F', shape=(10, 20)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_6_{order='F', shape=(10, 20, 30)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_6_{order='F', shape=(10, 20, 30)}::test_cub_min -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_max -tests/third_party/cupy/core_tests/test_ndarray_reduction.py::TestCubReduction_param_7_{order='F', shape=(10, 20, 30, 40)}::test_cub_min - -tests/third_party/cupy/indexing_tests/test_insert.py::TestPutmaskDifferentDtypes::test_putmask_differnt_dtypes_raises -tests/third_party/cupy/indexing_tests/test_insert.py::TestPutmask::test_putmask_non_equal_shape_raises - -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_base -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_copy -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_copy_next -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiter::test_len -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_0_{index=Ellipsis, shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_3_{index=slice(None, None, None), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_4_{index=slice(None, 10, None), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_5_{index=slice(None, None, 2), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_6_{index=slice(None, None, -1), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_7_{index=slice(10, None, -1), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_8_{index=slice(10, None, -2), shape=(2, 3, 4)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_9_{index=slice(None, None, None), shape=()}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_1d -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_different_types -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_ndarray_nd -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscript_param_10_{index=slice(None, None, None), shape=(10,)}::test_setitem_scalar -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_0_{index=None, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_0_{index=None, shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_1_{index=(0,), shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_1_{index=(0,), shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_2_{index=True, shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_2_{index=True, shape=(2, 3, 4)}::test_setitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_3_{index=[0], shape=(2, 3, 4)}::test_getitem -tests/third_party/cupy/indexing_tests/test_iterate.py::TestFlatiterSubscriptIndexError_param_3_{index=[0], shape=(2, 3, 4)}::test_setitem - -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_0_{shapes=[(), ()]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_1_{shapes=[(0,), (0,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_2_{shapes=[(1,), (1,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_3_{shapes=[(2,), (2,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_4_{shapes=[(0,), (1,)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_5_{shapes=[(2, 3), (1, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_6_{shapes=[(2, 1, 3, 4), (3, 1, 4)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_7_{shapes=[(4, 3, 2, 3), (2, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_8_{shapes=[(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_9_{shapes=[(0, 1, 1, 3), (2, 1, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestBroadcast_param_10_{shapes=[(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]}::test_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_0_{shapes=[(3,), (2,)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_1_{shapes=[(3, 2), (2, 3)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_2_{shapes=[(3, 2), (3, 4)]}::test_invalid_broadcast -tests/third_party/cupy/manipulation_tests/test_dims.py::TestInvalidBroadcast_param_3_{shapes=[(0,), (2,)]}::test_invalid_broadcast - -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_period -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_left_right -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_fy -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_fx -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_nan_x -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_fy -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_fx -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_x -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_size1 -tests/third_party/cupy/math_tests/test_misc.py::TestMisc::test_interp_inf_to_nan - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_0_{a_shape=(), b_shape=(), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_1_{a_shape=(), b_shape=(), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_2_{a_shape=(), b_shape=(3, 2), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_3_{a_shape=(), b_shape=(3, 2), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_4_{a_shape=(3, 2), b_shape=(), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_5_{a_shape=(3, 2), b_shape=(), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_1_{scale_shape=(), shape=(3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_2_{scale_shape=(), shape=None}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_0_{p_shape=(), shape=(4, 3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGeometric_param_1_{p_shape=(), shape=(3, 2)}::test_geometric -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLognormal_param_0_{mean_shape=(), shape=(4, 3, 2), sigma_shape=()}::test_lognormal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLognormal_param_1_{mean_shape=(), shape=(3, 2), sigma_shape=()}::test_lognormal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_0_{p_shape=(), shape=(4, 3, 2)}::test_logseries -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_0_{p_shape=(), shape=(4, 3, 2)}::test_logseries_for_invalid_p -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_1_{p_shape=(), shape=(3, 2)}::test_logseries -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogseries_param_1_{p_shape=(), shape=(3, 2)}::test_logseries_for_invalid_p -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_0_{n_shape=(), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_0_{n_shape=(), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_1_{n_shape=(), p_shape=(), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_1_{n_shape=(), p_shape=(), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_2_{n_shape=(), p_shape=(3, 2), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_2_{n_shape=(), p_shape=(3, 2), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_3_{n_shape=(), p_shape=(3, 2), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_3_{n_shape=(), p_shape=(3, 2), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_4_{n_shape=(3, 2), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_4_{n_shape=(3, 2), p_shape=(), shape=(4, 3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_5_{n_shape=(3, 2), p_shape=(), shape=(3, 2)}::test_negative_binomial -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNegativeBinomial_param_5_{n_shape=(3, 2), p_shape=(), shape=(3, 2)}::test_negative_binomial_for_noninteger_n -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_0_{df_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_1_{df_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_2_{df_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_3_{df_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_4_{df_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_5_{df_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_0_{dfden_shape=(), dfnum_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_10_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_11_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_12_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_13_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_1_{dfden_shape=(), dfnum_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_2_{dfden_shape=(), dfnum_shape=(), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_3_{dfden_shape=(), dfnum_shape=(), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_4_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_5_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_6_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_7_{dfden_shape=(), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_8_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_9_{dfden_shape=(3, 2), dfnum_shape=(), nonc_shape=(), shape=(3, 2)}::test_noncentral_f - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_0_{a_shape=(), shape=(4, 3, 2)}::test_power -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_0_{a_shape=(), shape=(4, 3, 2)}::test_power_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_1_{a_shape=(), shape=(3, 2)}::test_power -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPower_param_1_{a_shape=(), shape=(3, 2)}::test_power_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh_for_negative_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_0_{scale_shape=(), shape=(4, 3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh_for_negative_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_1_{scale_shape=(), shape=(3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_0_{shape=(4, 3, 2), shape_shape=()}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_2_{shape=(3, 2), shape_shape=()}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_0_{left_shape=(), mode_shape=(), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_10_{left_shape=(3, 2), mode_shape=(), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_11_{left_shape=(3, 2), mode_shape=(), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_12_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_13_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_1_{left_shape=(), mode_shape=(), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_2_{left_shape=(), mode_shape=(), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_3_{left_shape=(), mode_shape=(), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_4_{left_shape=(), mode_shape=(3, 2), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_5_{left_shape=(), mode_shape=(3, 2), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_6_{left_shape=(), mode_shape=(3, 2), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_7_{left_shape=(), mode_shape=(3, 2), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_8_{left_shape=(3, 2), mode_shape=(), right_shape=(), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_9_{left_shape=(3, 2), mode_shape=(), right_shape=(), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_0_{high_shape=(), low_shape=(), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_1_{high_shape=(), low_shape=(), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_4_{high_shape=(3, 2), low_shape=(), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_5_{high_shape=(3, 2), low_shape=(), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_0_{mean_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_1_{mean_shape=(), scale_shape=(), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_2_{mean_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_3_{mean_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_4_{mean_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_5_{mean_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_0_{a_shape=(), shape=(4, 3, 2)}::test_weibull_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_1_{a_shape=(), shape=(3, 2)}::test_weibull_for_negative_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsZipf_param_0_{a_shape=(), shape=(4, 3, 2)}::test_zipf -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsZipf_param_1_{a_shape=(), shape=(3, 2)}::test_zipf -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_no_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_p_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_replace_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_replace_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_replace_and_p_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_and_replace_are_none -tests/third_party/cupy/random_tests/test_sample.py::TestChoice::test_size_is_none -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_0_{size=None}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_1_{size=()}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_2_{size=4}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_3_{size=(0,)}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestMultinomial_param_4_{size=(1, 0)}::test_multinomial -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_bound_float1 -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit -tests/third_party/cupy/random_tests/test_sample.py::TestRandint2::test_goodness_of_fit_2 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_1 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_bound_2 -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit -tests/third_party/cupy/random_tests/test_sample.py::TestRandomIntegers2::test_goodness_of_fit_2 - -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_defaults[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_q_list[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_no_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_neg_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_tuple_axis[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_scalar_q[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_keepdims[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_out[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[linear] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[lower] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[higher] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_bad_q[midpoint] -tests/third_party/cupy/statistics_tests/test_order.py::TestOrder::test_percentile_unxpected_method diff --git a/dpnp/tests/skipped_tests_gpu_no_fp64.tbl b/dpnp/tests/skipped_tests_gpu_no_fp64.tbl index 7cd2d8a1c152..2b751bbf3a8d 100644 --- a/dpnp/tests/skipped_tests_gpu_no_fp64.tbl +++ b/dpnp/tests/skipped_tests_gpu_no_fp64.tbl @@ -1,172 +1 @@ tests/test_umath.py::test_umaths[('floor_divide', 'ff')] - -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_6_{a_shape=(3, 2), b_shape=(3, 2), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_7_{a_shape=(3, 2), b_shape=(3, 2), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_0_{df_shape=(), shape=(4, 3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_1_{df_shape=(), shape=(3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_2_{df_shape=(3, 2), shape=(4, 3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_3_{df_shape=(3, 2), shape=(3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsDirichlet_param_0_{alpha_shape=(3,), shape=(4, 3, 2, 3)}::test_dirichlet -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsDirichlet_param_1_{alpha_shape=(3,), shape=(3, 2, 3)}::test_dirichlet -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_3_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_4_{scale_shape=(3, 2), shape=(3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_5_{scale_shape=(3, 2), shape=None}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_0_{dfden_shape=(), dfnum_shape=(), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_1_{dfden_shape=(), dfnum_shape=(), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_2_{dfden_shape=(), dfnum_shape=(3, 2), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_3_{dfden_shape=(), dfnum_shape=(3, 2), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_4_{dfden_shape=(3, 2), dfnum_shape=(), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_5_{dfden_shape=(3, 2), dfnum_shape=(), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_6_{dfden_shape=(3, 2), dfnum_shape=(3, 2), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_7_{dfden_shape=(3, 2), dfnum_shape=(3, 2), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_0_{scale_shape=(), shape=(4, 3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_1_{scale_shape=(), shape=(4, 3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_2_{scale_shape=(), shape=(3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_3_{scale_shape=(), shape=(3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_4_{scale_shape=(3, 2), shape=(4, 3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_5_{scale_shape=(3, 2), shape=(4, 3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_6_{scale_shape=(3, 2), shape=(3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_7_{scale_shape=(3, 2), shape=(3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_6_{df_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_7_{df_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_14_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_15_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_0_{a_shape=(), shape=(4, 3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_1_{a_shape=(), shape=(3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_2_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_2_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_3_{scale_shape=(3, 2), shape=(3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_3_{scale_shape=(3, 2), shape=(3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardCauchy_param_0_{shape=(4, 3, 2)}::test_standard_cauchy -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardCauchy_param_1_{shape=(3, 2)}::test_standard_cauchy -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardExponential_param_0_{shape=(4, 3, 2)}::test_standard_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardExponential_param_1_{shape=(3, 2)}::test_standard_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_1_{shape=(4, 3, 2), shape_shape=(3, 2)}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_3_{shape=(3, 2), shape_shape=(3, 2)}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardNormal_param_0_{shape=(4, 3, 2)}::test_standard_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardNormal_param_1_{shape=(3, 2)}::test_standard_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_0_{df_shape=(), shape=(4, 3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_1_{df_shape=(), shape=(3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_2_{df_shape=(3, 2), shape=(4, 3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_3_{df_shape=(3, 2), shape=(3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_14_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_15_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_2_{high_shape=(), low_shape=(3, 2), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_3_{high_shape=(), low_shape=(3, 2), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_6_{high_shape=(3, 2), low_shape=(3, 2), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_7_{high_shape=(3, 2), low_shape=(3, 2), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_0_{kappa_shape=(), mu_shape=(), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_1_{kappa_shape=(), mu_shape=(), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_2_{kappa_shape=(), mu_shape=(3, 2), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_3_{kappa_shape=(), mu_shape=(3, 2), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_4_{kappa_shape=(3, 2), mu_shape=(), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_5_{kappa_shape=(3, 2), mu_shape=(), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_6_{kappa_shape=(3, 2), mu_shape=(3, 2), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_7_{kappa_shape=(3, 2), mu_shape=(3, 2), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_6_{mean_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_7_{mean_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_6_{a_shape=(3, 2), b_shape=(3, 2), shape=(4, 3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsBeta_param_7_{a_shape=(3, 2), b_shape=(3, 2), shape=(3, 2)}::test_beta -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_0_{df_shape=(), shape=(4, 3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_1_{df_shape=(), shape=(3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_2_{df_shape=(3, 2), shape=(4, 3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsChisquare_param_3_{df_shape=(3, 2), shape=(3, 2)}::test_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsDirichlet_param_0_{alpha_shape=(3,), shape=(4, 3, 2, 3)}::test_dirichlet -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsDirichlet_param_1_{alpha_shape=(3,), shape=(3, 2, 3)}::test_dirichlet -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_3_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_4_{scale_shape=(3, 2), shape=(3, 2)}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsExponential_param_5_{scale_shape=(3, 2), shape=None}::test_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_0_{dfden_shape=(), dfnum_shape=(), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_1_{dfden_shape=(), dfnum_shape=(), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_2_{dfden_shape=(), dfnum_shape=(3, 2), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_3_{dfden_shape=(), dfnum_shape=(3, 2), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_4_{dfden_shape=(3, 2), dfnum_shape=(), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_5_{dfden_shape=(3, 2), dfnum_shape=(), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_6_{dfden_shape=(3, 2), dfnum_shape=(3, 2), shape=(4, 3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsF_param_7_{dfden_shape=(3, 2), dfnum_shape=(3, 2), shape=(3, 2)}::test_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_0_{scale_shape=(), shape=(4, 3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_1_{scale_shape=(), shape=(4, 3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_2_{scale_shape=(), shape=(3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_3_{scale_shape=(), shape=(3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_4_{scale_shape=(3, 2), shape=(4, 3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_5_{scale_shape=(3, 2), shape=(4, 3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_6_{scale_shape=(3, 2), shape=(3, 2), shape_shape=()}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGamma_param_7_{scale_shape=(3, 2), shape=(3, 2), shape_shape=(3, 2)}::test_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsGumbel_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_gumbel -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsuLaplace_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_laplace -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsLogistic_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_logistic -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_6_{df_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralChisquare_param_7_{df_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_chisquare -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_14_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(4, 3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNoncentralF_param_15_{dfden_shape=(3, 2), dfnum_shape=(3, 2), nonc_shape=(3, 2), shape=(3, 2)}::test_noncentral_f -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_0_{loc_shape=(), scale_shape=(), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_1_{loc_shape=(), scale_shape=(), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_2_{loc_shape=(), scale_shape=(3, 2), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_3_{loc_shape=(), scale_shape=(3, 2), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_4_{loc_shape=(3, 2), scale_shape=(), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_5_{loc_shape=(3, 2), scale_shape=(), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_6_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsNormal_param_7_{loc_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_0_{a_shape=(), shape=(4, 3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_1_{a_shape=(), shape=(3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsPareto_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_pareto -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_2_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_2_{scale_shape=(3, 2), shape=(4, 3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_3_{scale_shape=(3, 2), shape=(3, 2)}::test_rayleigh -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsRayleigh_param_3_{scale_shape=(3, 2), shape=(3, 2)}::test_rayleigh_for_zero_scale -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardCauchy_param_0_{shape=(4, 3, 2)}::test_standard_cauchy -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardCauchy_param_1_{shape=(3, 2)}::test_standard_cauchy -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardExponential_param_0_{shape=(4, 3, 2)}::test_standard_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardExponential_param_1_{shape=(3, 2)}::test_standard_exponential -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_1_{shape=(4, 3, 2), shape_shape=(3, 2)}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardGamma_param_3_{shape=(3, 2), shape_shape=(3, 2)}::test_standard_gamma -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardNormal_param_0_{shape=(4, 3, 2)}::test_standard_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardNormal_param_1_{shape=(3, 2)}::test_standard_normal -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_0_{df_shape=(), shape=(4, 3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_1_{df_shape=(), shape=(3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_2_{df_shape=(3, 2), shape=(4, 3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsStandardT_param_3_{df_shape=(3, 2), shape=(3, 2)}::test_standard_t -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_14_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(3, 2), shape=(4, 3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsTriangular_param_15_{left_shape=(3, 2), mode_shape=(3, 2), right_shape=(3, 2), shape=(3, 2)}::test_triangular -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_2_{high_shape=(), low_shape=(3, 2), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_3_{high_shape=(), low_shape=(3, 2), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_6_{high_shape=(3, 2), low_shape=(3, 2), shape=(4, 3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsUniform_param_7_{high_shape=(3, 2), low_shape=(3, 2), shape=(3, 2)}::test_uniform -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_0_{kappa_shape=(), mu_shape=(), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_1_{kappa_shape=(), mu_shape=(), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_2_{kappa_shape=(), mu_shape=(3, 2), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_3_{kappa_shape=(), mu_shape=(3, 2), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_4_{kappa_shape=(3, 2), mu_shape=(), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_5_{kappa_shape=(3, 2), mu_shape=(), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_6_{kappa_shape=(3, 2), mu_shape=(3, 2), shape=(4, 3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsVonmises_param_7_{kappa_shape=(3, 2), mu_shape=(3, 2), shape=(3, 2)}::test_vonmises -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_6_{mean_shape=(3, 2), scale_shape=(3, 2), shape=(4, 3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWald_param_7_{mean_shape=(3, 2), scale_shape=(3, 2), shape=(3, 2)}::test_wald -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_2_{a_shape=(3, 2), shape=(4, 3, 2)}::test_weibull_for_inf_a -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull -tests/third_party/cupy/random_tests/test_distributions.py::TestDistributionsWeibull_param_3_{a_shape=(3, 2), shape=(3, 2)}::test_weibull_for_inf_a diff --git a/dpnp/tests/test_sycl_queue.py b/dpnp/tests/test_sycl_queue.py index 9b917c29fd27..0041fbff4752 100644 --- a/dpnp/tests/test_sycl_queue.py +++ b/dpnp/tests/test_sycl_queue.py @@ -1079,24 +1079,27 @@ def test_vecdot(device, shape_pair): @pytest.mark.parametrize( - "func, kwargs", + "func, args, kwargs", [ - pytest.param("normal", {"loc": 1.0, "scale": 3.4, "size": (5, 12)}), - pytest.param("rand", {"d0": 20}), + pytest.param("normal", [], {"loc": 1.0, "scale": 3.4, "size": (5, 12)}), + pytest.param("rand", [20], {}), pytest.param( "randint", + [], {"low": 2, "high": 15, "size": (4, 8, 16), "dtype": dpnp.int32}, ), - pytest.param("randn", {"d0": 20}), - pytest.param("random", {"size": (35, 45)}), + pytest.param("randn", [], {"d0": 20}), + pytest.param("random", [], {"size": (35, 45)}), + pytest.param( + "random_integers", [], {"low": -17, "high": 3, "size": (12, 16)} + ), + pytest.param("random_sample", [], {"size": (7, 7)}), + pytest.param("ranf", [], {"size": (10, 7, 12)}), + pytest.param("sample", [], {"size": (7, 9)}), + pytest.param("standard_normal", [], {"size": (4, 4, 8)}), pytest.param( - "random_integers", {"low": -17, "high": 3, "size": (12, 16)} + "uniform", [], {"low": 1.0, "high": 2.0, "size": (4, 2, 5)} ), - pytest.param("random_sample", {"size": (7, 7)}), - pytest.param("ranf", {"size": (10, 7, 12)}), - pytest.param("sample", {"size": (7, 9)}), - pytest.param("standard_normal", {"size": (4, 4, 8)}), - pytest.param("uniform", {"low": 1.0, "high": 2.0, "size": (4, 2, 5)}), ], ) @pytest.mark.parametrize( @@ -1105,11 +1108,11 @@ def test_vecdot(device, shape_pair): ids=[device.filter_string for device in valid_devices], ) @pytest.mark.parametrize("usm_type", ["host", "device", "shared"]) -def test_random(func, kwargs, device, usm_type): +def test_random(func, args, kwargs, device, usm_type): kwargs = {**kwargs, "device": device, "usm_type": usm_type} # test with default SYCL queue per a device - res_array = getattr(dpnp.random, func)(**kwargs) + res_array = getattr(dpnp.random, func)(*args, **kwargs) assert device == res_array.sycl_device assert usm_type == res_array.usm_type @@ -1121,7 +1124,7 @@ def test_random(func, kwargs, device, usm_type): kwargs["sycl_queue"] = sycl_queue # test with in-order SYCL queue per a device and passed as argument - res_array = getattr(dpnp.random, func)(**kwargs) + res_array = getattr(dpnp.random, func)(*args, **kwargs) assert usm_type == res_array.usm_type assert_sycl_queue_equal(res_array.sycl_queue, sycl_queue) @@ -2057,7 +2060,6 @@ def test_broadcast_to(device): pytest.param("concatenate", [[1, 2], [3, 4]], [[5, 6]]), pytest.param("dstack", [[1], [2], [3]], [[2], [3], [4]]), pytest.param("hstack", (1, 2, 3), (4, 5, 6)), - pytest.param("row_stack", [[7], [1], [2], [3]], [[2], [3], [9], [4]]), pytest.param("stack", [1, 2, 3], [4, 5, 6]), pytest.param("vstack", [0, 1, 2, 3], [4, 5, 6, 7]), ], diff --git a/dpnp/tests/test_usm_type.py b/dpnp/tests/test_usm_type.py index 7fc2abc7a795..29c9419fe84d 100644 --- a/dpnp/tests/test_usm_type.py +++ b/dpnp/tests/test_usm_type.py @@ -808,7 +808,6 @@ def test_broadcast_to(usm_type): pytest.param("concatenate", [[1, 2], [3, 4]], [[5, 6]]), pytest.param("dstack", [[1], [2], [3]], [[2], [3], [4]]), pytest.param("hstack", (1, 2, 3), (4, 5, 6)), - pytest.param("row_stack", [[7], [1], [2], [3]], [[2], [3], [9], [4]]), pytest.param("stack", [1, 2, 3], [4, 5, 6]), pytest.param("vstack", [0, 1, 2, 3], [4, 5, 6, 7]), ], diff --git a/dpnp/tests/third_party/cupy/binary_tests/test_elementwise.py b/dpnp/tests/third_party/cupy/binary_tests/test_elementwise.py index 0dc9a8fbfccc..452314802309 100644 --- a/dpnp/tests/third_party/cupy/binary_tests/test_elementwise.py +++ b/dpnp/tests/third_party/cupy/binary_tests/test_elementwise.py @@ -4,6 +4,7 @@ class TestElementwise(unittest.TestCase): + @testing.for_int_dtypes() @testing.numpy_cupy_array_equal() def check_unary_int(self, name, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/binary_tests/test_packing.py b/dpnp/tests/third_party/cupy/binary_tests/test_packing.py new file mode 100644 index 000000000000..a72a8a558b08 --- /dev/null +++ b/dpnp/tests/third_party/cupy/binary_tests/test_packing.py @@ -0,0 +1,85 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "packbits() and unpackbits() are not supported yet", allow_module_level=True +) + + +class TestPacking(unittest.TestCase): + + @testing.for_int_dtypes() + @testing.numpy_cupy_array_equal() + def check_packbits(self, data, xp, dtype, bitorder="big"): + # Note numpy <= 1.9 raises an Exception when an input array is bool. + # See https://github.com/numpy/numpy/issues/5377 + a = xp.array(data, dtype=dtype) + return xp.packbits(a, bitorder=bitorder) + + @testing.numpy_cupy_array_equal() + def check_unpackbits(self, data, xp, bitorder="big"): + a = xp.array(data, dtype=xp.uint8) + return xp.unpackbits(a, bitorder=bitorder) + + def test_packbits(self): + self.check_packbits([0]) + self.check_packbits([1]) + self.check_packbits([0, 1]) + self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1]) + self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1, 1]) + self.check_packbits(numpy.arange(24).reshape((2, 3, 4)) % 2) + + def test_packbits_order(self): + for bo in ["big", "little"]: + self.check_packbits([0], bitorder=bo) + self.check_packbits([1], bitorder=bo) + self.check_packbits([0, 1], bitorder=bo) + self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1], bitorder=bo) + self.check_packbits([1, 0, 1, 1, 0, 1, 1, 1, 1], bitorder=bo) + self.check_packbits( + numpy.arange(24).reshape((2, 3, 4)) % 2, bitorder=bo + ) + + def test_packbits_empty(self): + # Note packbits of numpy <= 1.11 has a bug against empty arrays. + # See https://github.com/numpy/numpy/issues/8324 + self.check_packbits([]) + + def test_pack_invalid_order(self): + a = cupy.array([10, 20, 30]) + pytest.raises(ValueError, cupy.packbits, a, bitorder="ascendant") + pytest.raises(ValueError, cupy.packbits, a, bitorder=10.4) + + def test_pack_invalid_array(self): + fa = cupy.array([10, 20, 30], dtype=float) + pytest.raises(TypeError, cupy.packbits, fa) + + def test_unpackbits(self): + self.check_unpackbits([]) + self.check_unpackbits([0]) + self.check_unpackbits([1]) + self.check_unpackbits([255]) + self.check_unpackbits([100, 200, 123, 213]) + + def test_unpack_invalid_array(self): + a = cupy.array([10, 20, 30]) + pytest.raises(TypeError, cupy.unpackbits, a) + pytest.raises(TypeError, cupy.unpackbits, a.astype(float)) + + def test_pack_unpack_order(self): + for bo in ["big", "little"]: + self.check_unpackbits([], bitorder=bo) + self.check_unpackbits([0], bitorder=bo) + self.check_unpackbits([1], bitorder=bo) + self.check_unpackbits([255], bitorder=bo) + self.check_unpackbits([100, 200, 123, 213], bitorder=bo) + + def test_unpack_invalid_order(self): + a = cupy.array([10, 20, 30], dtype=cupy.uint8) + pytest.raises(ValueError, cupy.unpackbits, a, bitorder="r") + pytest.raises(ValueError, cupy.unpackbits, a, bitorder=10) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_array_function.py b/dpnp/tests/third_party/cupy/core_tests/test_array_function.py new file mode 100644 index 000000000000..7878a5a0aaff --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_array_function.py @@ -0,0 +1,64 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "__array_function__ protocol is not supported", allow_module_level=True +) + + +class TestArrayFunction(unittest.TestCase): + + @testing.with_requires("numpy>=1.17.0") + def test_array_function(self): + a = numpy.random.randn(100, 100) + a_cpu = numpy.asarray(a) + a_gpu = cupy.asarray(a) + + # The numpy call for both CPU and GPU arrays is intentional to test the + # __array_function__ protocol + qr_cpu = numpy.linalg.qr(a_cpu) + qr_gpu = numpy.linalg.qr(a_gpu) + + if isinstance(qr_cpu, tuple): + for b_cpu, b_gpu in zip(qr_cpu, qr_gpu): + assert b_cpu.dtype == b_gpu.dtype + testing.assert_allclose(b_cpu, b_gpu, atol=1e-4) + else: + assert qr_cpu.dtype == qr_gpu.dtype + testing.assert_allclose(qr_cpu, qr_gpu, atol=1e-4) + + @testing.with_requires("numpy>=1.17.0") + def test_array_function2(self): + a = numpy.random.randn(100, 100) + a_cpu = numpy.asarray(a) + a_gpu = cupy.asarray(a) + + # The numpy call for both CPU and GPU arrays is intentional to test the + # __array_function__ protocol + out_cpu = numpy.sum(a_cpu, axis=1) + out_gpu = numpy.sum(a_gpu, axis=1) + + assert out_cpu.dtype == out_gpu.dtype + testing.assert_allclose(out_cpu, out_gpu, atol=1e-4) + + @testing.with_requires("numpy>=1.17.0") + @testing.numpy_cupy_equal() + def test_array_function_can_cast(self, xp): + return numpy.can_cast(xp.arange(2), "f4") + + @testing.with_requires("numpy>=1.17.0") + @testing.numpy_cupy_equal() + def test_array_function_common_type(self, xp): + return numpy.common_type( + xp.arange(2, dtype="f8"), xp.arange(2, dtype="f4") + ) + + @testing.with_requires("numpy>=1.17.0") + @testing.numpy_cupy_equal() + def test_array_function_result_type(self, xp): + return numpy.result_type(3, xp.arange(2, dtype="f8")) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_carray.py b/dpnp/tests/third_party/cupy/core_tests/test_carray.py new file mode 100644 index 000000000000..b161ef49e6b2 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_carray.py @@ -0,0 +1,101 @@ +import unittest + +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("CArray is not supported", allow_module_level=True) + + +class TestCArray(unittest.TestCase): + + def test_size(self): + x = cupy.arange(3).astype("i") + y = cupy.ElementwiseKernel( + "raw int32 x", + "int32 y", + "y = x.size()", + "test_carray_size", + )(x, size=1) + assert int(y[0]) == 3 + + def test_shape(self): + x = cupy.arange(6).reshape((2, 3)).astype("i") + y = cupy.ElementwiseKernel( + "raw int32 x", + "int32 y", + "y = x.shape()[i]", + "test_carray_shape", + )(x, size=2) + testing.assert_array_equal(y, (2, 3)) + + def test_strides(self): + x = cupy.arange(6).reshape((2, 3)).astype("i") + y = cupy.ElementwiseKernel( + "raw int32 x", + "int32 y", + "y = x.strides()[i]", + "test_carray_strides", + )(x, size=2) + testing.assert_array_equal(y, (12, 4)) + + def test_getitem_int(self): + x = cupy.arange(24).reshape((2, 3, 4)).astype("i") + y = cupy.empty_like(x) + y = cupy.ElementwiseKernel( + "raw T x", + "int32 y", + "y = x[i]", + "test_carray_getitem_int", + )(x, y) + testing.assert_array_equal(y, x) + + def test_getitem_idx(self): + x = cupy.arange(24).reshape((2, 3, 4)).astype("i") + y = cupy.empty_like(x) + y = cupy.ElementwiseKernel( + "raw T x", + "int32 y", + "ptrdiff_t idx[] = {i / 12, i / 4 % 3, i % 4}; y = x[idx]", + "test_carray_getitem_idx", + )(x, y) + testing.assert_array_equal(y, x) + + +@testing.parameterize( + {"size": 2**31 - 1024}, + {"size": 2**31}, + {"size": 2**31 + 1024}, + {"size": 2**32 - 1024}, + {"size": 2**32}, + {"size": 2**32 + 1024}, +) +@testing.slow +class TestCArray32BitBoundary(unittest.TestCase): + # This test case is intended to confirm CArray indexing work correctly + # with input/output arrays whose size is so large that it crosses the + # 32-bit boundary (in terms of both number of elements and size in bytes). + # This test requires approx. 8 GiB GPU memory to run. + # See https://github.com/cupy/cupy/pull/882 for detailed discussions. + + def tearDown(self): + # Free huge memory for slow test + cupy.get_default_memory_pool().free_all_blocks() + + # HIP is known to fail with sizes > 2**32-1024 + @unittest.skipIf(cupy.cuda.runtime.is_hip, "HIP does not support this") + def test(self): + # Elementwise + a = cupy.full((1, self.size), 7, dtype=cupy.int8) + # Reduction + result = a.sum(axis=0, dtype=cupy.int8) + # Explicitly specify the dtype to absorb Linux/Windows difference. + assert result.sum(dtype=cupy.int64) == self.size * 7 + + # HIP is known to fail with sizes > 2**32-1024 + @unittest.skipIf(cupy.cuda.runtime.is_hip, "HIP does not support this") + def test_assign(self): + a = cupy.zeros(self.size, dtype=cupy.int8) + a[-1] = 1.0 + assert a.sum() == 1 diff --git a/dpnp/tests/third_party/cupy/core_tests/test_core.py b/dpnp/tests/third_party/cupy/core_tests/test_core.py index 36b8fc340334..bf0cbecb24c8 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_core.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_core.py @@ -9,6 +9,7 @@ class TestSize(unittest.TestCase): + # def tearDown(self): # # Free huge memory for slow test # cupy.get_default_memory_pool().free_all_blocks() @@ -51,6 +52,7 @@ def test_size_huge(self, xp): @pytest.mark.skip("no cupy._core submodule") class TestOrder(unittest.TestCase): + @testing.for_orders(_orders.keys()) def test_ndarray(self, order): order_expect = _orders[order] @@ -101,6 +103,7 @@ def test_cupy_ndarray(self, dtype): ) @pytest.mark.skip("compiling cupy headers are not supported") class TestCuPyHeaders(unittest.TestCase): + def setUp(self): self.temporary_cache_dir_context = test_raw.use_temporary_cache_dir() self.cache_dir = self.temporary_cache_dir_context.__enter__() diff --git a/dpnp/tests/third_party/cupy/core_tests/test_cub_reduction.py b/dpnp/tests/third_party/cupy/core_tests/test_cub_reduction.py new file mode 100644 index 000000000000..2983c9a152f7 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_cub_reduction.py @@ -0,0 +1,175 @@ +import sys +import unittest +from itertools import combinations + +import pytest + +import dpnp as cupy + +# from cupy import _environment +from dpnp.tests.third_party.cupy import testing + +# from cupy._core import _accelerator +# from cupy._core import _cub_reduction +# from cupy.cuda import memory + +pytest.skip("CUB reduction is not supported", allow_module_level=True) + + +# This test class and its children below only test if CUB backend can be used +# or not; they don't verify its correctness as it's already extensively covered +# by existing tests +@unittest.skipIf(_environment.get_cub_path() is None, "CUB not found") +class CubReductionTestBase(unittest.TestCase): + """ + Note: call self.can_use() when arrays are already allocated, otherwise + call self._test_can_use(). + """ + + def setUp(self): + if cupy.cuda.runtime.is_hip: + if _environment.get_hipcc_path() is None: + self.skipTest("hipcc is not found") + + self.can_use = cupy._core._cub_reduction._can_use_cub_block_reduction + + self.old_accelerators = _accelerator.get_reduction_accelerators() + _accelerator.set_reduction_accelerators(["cub"]) + + def tearDown(self): + _accelerator.set_reduction_accelerators(self.old_accelerators) + + def _test_can_use(self, i_shape, o_shape, r_axis, o_axis, order, expected): + in_args = [ + cupy.testing.shaped_arange(i_shape, order=order), + ] + out_args = [ + cupy.testing.shaped_arange(o_shape, order=order), + ] + result = self.can_use(in_args, out_args, r_axis, o_axis) is not None + assert result is expected + + +@testing.parameterize( + *testing.product( + { + "shape": [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)], + "order": ("C", "F"), + } + ) +) +class TestSimpleCubReductionKernelContiguity(CubReductionTestBase): + + @testing.for_contiguous_axes() + def test_can_use_cub_contiguous(self, axis): + r_axis = axis + i_shape = self.shape + o_axis = tuple(i for i in range(len(i_shape)) if i not in r_axis) + o_shape = tuple(self.shape[i] for i in o_axis) + self._test_can_use(i_shape, o_shape, r_axis, o_axis, self.order, True) + + @testing.for_contiguous_axes() + def test_can_use_cub_non_contiguous(self, axis): + # array is contiguous, but reduce_axis is not + dim = len(self.shape) + r_dim = len(axis) + non_contiguous_axes = [ + i for i in combinations(range(dim), r_dim) if i != axis + ] + + i_shape = self.shape + for r_axis in non_contiguous_axes: + o_axis = tuple(i for i in range(dim) if i not in r_axis) + o_shape = tuple(self.shape[i] for i in o_axis) + self._test_can_use( + i_shape, o_shape, r_axis, o_axis, self.order, False + ) + + +class TestSimpleCubReductionKernelMisc(CubReductionTestBase): + + def test_can_use_cub_nonsense_input1(self): + # two inputs are not allowed + a = cupy.random.random((2, 3, 4)) + b = cupy.random.random((2, 3, 4)) + c = cupy.empty( + ( + 2, + 3, + ) + ) + assert self.can_use([a, b], [c], (2,), (0, 1)) is None + + def test_can_use_cub_nonsense_input2(self): + # reduce_axis and out_axis do not add up to full axis set + self._test_can_use((2, 3, 4), (2, 3), (2,), (0,), "C", False) + + def test_can_use_cub_nonsense_input3(self): + # array is neither C- nor F- contiguous + a = cupy.random.random((3, 4, 5)) + a = a[:, 0:-1:2, 0:-1:3] + assert not a.flags.forc + b = cupy.empty((3,)) + assert self.can_use([a], [b], (1, 2), (0,)) is None + + def test_can_use_cub_zero_size_input(self): + self._test_can_use((2, 0, 3), (), (0, 1, 2), (), "C", False) + + # We actually just want to test shapes, no need to allocate large memory. + def test_can_use_cub_oversize_input1(self): + # full reduction with array size > 64 GB + mem = memory.alloc(100) + a = cupy.ndarray((2**6 * 1024**3 + 1,), dtype=cupy.int8, memptr=mem) + b = cupy.empty((), dtype=cupy.int8) + assert self.can_use([a], [b], (0,), ()) is None + + def test_can_use_cub_oversize_input2(self): + # full reduction with array size = 64 GB should work! + mem = memory.alloc(100) + a = cupy.ndarray((2**6 * 1024**3,), dtype=cupy.int8, memptr=mem) + b = cupy.empty((), dtype=cupy.int8) + assert self.can_use([a], [b], (0,), ()) is not None + + def test_can_use_cub_oversize_input3(self): + # full reduction with 2^63-1 elements + mem = memory.alloc(100) + max_num = sys.maxsize + a = cupy.ndarray((max_num,), dtype=cupy.int8, memptr=mem) + b = cupy.empty((), dtype=cupy.int8) + assert self.can_use([a], [b], (0,), ()) is None + + def test_can_use_cub_oversize_input4(self): + # partial reduction with too many (2^31) blocks + mem = memory.alloc(100) + a = cupy.ndarray((2**31, 8), dtype=cupy.int8, memptr=mem) + b = cupy.empty((), dtype=cupy.int8) + assert self.can_use([a], [b], (1,), (0,)) is None + + def test_can_use_accelerator_set_unset(self): + # ensure we use CUB block reduction and not CUB device reduction + old_routine_accelerators = _accelerator.get_routine_accelerators() + _accelerator.set_routine_accelerators([]) + + a = cupy.random.random((10, 10)) + # this is the only function we can mock; the rest is cdef'd + func_name = "".join( + ( + "cupy._core._cub_reduction.", + "_SimpleCubReductionKernel_get_cached_function", + ) + ) + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=2 + ): # two passes + a.sum() + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=1 + ): # one pass + a.sum(axis=1) + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=0 + ): # not used + a.sum(axis=0) + + _accelerator.set_routine_accelerators(old_routine_accelerators) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_dlpack.py b/dpnp/tests/third_party/cupy/core_tests/test_dlpack.py index bce0d3a4d18c..82bd3336d89d 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_dlpack.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_dlpack.py @@ -1,5 +1,3 @@ -import unittest - import dpctl import dpctl.tensor._dlpack as dlp import numpy @@ -31,18 +29,54 @@ def _gen_array(dtype, alloc_q=None): return array -class TestDLPackConversion(unittest.TestCase): +class DLDummy: + """Dummy object to wrap a __dlpack__ capsule, so we can use from_dlpack.""" + + def __init__(self, capsule, device): + self.capsule = capsule + self.device = device + + def __dlpack__(self, *args, **kwargs): + return self.capsule + + def __dlpack_device__(self): + return self.device + + +@pytest.mark.skip("toDlpack() and fromDlpack() are not supported") +class TestDLPackConversion: + + @pytest.mark.filterwarnings("ignore::DeprecationWarning") @testing.for_all_dtypes(no_bool=False) def test_conversion(self, dtype): orig_array = _gen_array(dtype) - tensor = orig_array.__dlpack__() - out_array = dlp.from_dlpack_capsule(tensor) + tensor = orig_array.toDlpack() + out_array = cupy.fromDlpack(tensor) testing.assert_array_equal(orig_array, out_array) - assert orig_array.get_array()._pointer == out_array._pointer + assert orig_array.get_array()._pointer == out_array.get_array()._pointer + + +class TestNewDLPackConversion: + @pytest.fixture( + autouse=True, params=["device"] + ) # "managed" is not supported + def pool(self, request): + self.memory = request.param + if self.memory == "managed": + old_pool = cupy.get_default_memory_pool() + new_pool = cuda.MemoryPool(cuda.malloc_managed) + cuda.set_allocator(new_pool.malloc) + + yield + + cuda.set_allocator(old_pool.malloc) + else: + # Nothing to do, we can use the default pool. + yield + + del self.memory -@testing.parameterize(*testing.product({"memory": ("device", "managed")})) -class TestNewDLPackConversion(unittest.TestCase): def _get_stream(self, stream_name): if stream_name == "null": return dpctl.SyclQueue() @@ -55,6 +89,116 @@ def test_conversion(self, dtype): testing.assert_array_equal(orig_array, out_array) assert orig_array.get_array()._pointer == out_array.get_array()._pointer + @pytest.mark.skip("no limitations in from_dlpack()") + def test_from_dlpack_and_conv_errors(self): + orig_array = _gen_array("int8") + + with pytest.raises(NotImplementedError): + cupy.from_dlpack(orig_array, device=orig_array.device) + + with pytest.raises(BufferError): + # Currently CuPy's `__dlpack__` only allows `copy=True` + # for host copies. + cupy.from_dlpack(orig_array, copy=True) + + @pytest.mark.parametrize( + "kwargs, versioned", + [ + ({}, False), + ({"max_version": None}, False), + ({"max_version": (1, 0)}, True), + ({"max_version": (10, 10)}, True), + ({"max_version": (0, 8)}, False), + ], + ) + def test_conversion_max_version(self, kwargs, versioned): + orig_array = _gen_array("int8") + + capsule = orig_array.__dlpack__(**kwargs) + # We can identify if the version is correct via the name: + if versioned: + assert '"dltensor_versioned"' in str(capsule) + else: + assert '"dltensor"' in str(capsule) + + out_array = cupy.from_dlpack( + DLDummy(capsule, orig_array.__dlpack_device__()) + ) + + testing.assert_array_equal(orig_array, out_array) + assert orig_array.get_array()._pointer == out_array.get_array()._pointer + + def test_conversion_device(self): + orig_array = _gen_array("float32") + + # If the device is identical, then we support it: + capsule = orig_array.__dlpack__( + dl_device=orig_array.__dlpack_device__() + ) + out_array = cupy.from_dlpack( + DLDummy(capsule, orig_array.__dlpack_device__()) + ) + + testing.assert_array_equal(orig_array, out_array) + assert orig_array.get_array()._pointer == out_array.get_array()._pointer + + def test_conversion_bad_device(self): + arr = _gen_array("float32") + + # invalid device ID + with pytest.raises(BufferError): + arr.__dlpack__( + dl_device=(arr.__dlpack_device__()[0], 2**30), + max_version=(1, 0), + ) + + # Simple, non-matching device: + with pytest.raises(BufferError): + arr.__dlpack__(dl_device=(9, 0), max_version=(1, 0)) + + @pytest.mark.skip("numpy doesn't support kDLOneAPI device type") + def test_conversion_device_to_cpu(self): + # NOTE: This defaults to the old unversioned, which is needed for + # NumPy 1.x support. + # If (and only if) the device is managed, we also support exporting + # to CPU. + orig_array = _gen_array("float32") + + arr1 = numpy.from_dlpack( + DLDummy(orig_array.__dlpack__(dl_device=(1, 0)), device=(1, 0)) + ) + arr2 = numpy.from_dlpack( + DLDummy(orig_array.__dlpack__(dl_device=(1, 0)), device=(1, 0)) + ) + + numpy.testing.assert_array_equal(orig_array.get(), arr1) + assert orig_array.dtype == arr1.dtype + # Arrays share the same memory exactly when memory is managed. + assert numpy.may_share_memory(arr1, arr2) == (self.memory == "managed") + + arr_copy = numpy.from_dlpack( + DLDummy( + orig_array.__dlpack__(dl_device=(1, 0), copy=True), + device=(1, 0), + ) + ) + # The memory must not be shared with with a copy=True request + assert not numpy.may_share_memory(arr_copy, arr1) + numpy.testing.assert_array_equal(arr1, arr_copy) + + # Also test copy=False + if self.memory != "managed": + with pytest.raises(ValueError): + orig_array.__dlpack__(dl_device=(1, 0), copy=False) + else: + arr_nocopy = numpy.from_dlpack( + DLDummy( + orig_array.__dlpack__(dl_device=(1, 0), copy=False), + device=(1, 0), + ) + ) + assert numpy.may_share_memory(arr_nocopy, arr1) + def test_stream(self): allowed_streams = ["null", True] @@ -73,48 +217,61 @@ def test_stream(self): ) -class TestDLTensorMemory(unittest.TestCase): - # def setUp(self): - # self.old_pool = cupy.get_default_memory_pool() - # self.pool = cupy.cuda.MemoryPool() - # cupy.cuda.set_allocator(self.pool.malloc) +class TestDLTensorMemory: + + @pytest.fixture + def pool(self): + pass + + # old_pool = cupy.get_default_memory_pool() + # pool = cupy.cuda.MemoryPool() + # cupy.cuda.set_allocator(pool.malloc) + + # yield pool - # def tearDown(self): - # self.pool.free_all_blocks() - # cupy.cuda.set_allocator(self.old_pool.malloc) + # pool.free_all_blocks() + # cupy.cuda.set_allocator(old_pool.malloc) - def test_deleter(self): + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_deleter(self, pool, max_version): # memory is freed when tensor is deleted, as it's not consumed array = cupy.empty(10) - tensor = array.__dlpack__() + tensor = array.__dlpack__(max_version=max_version) # str(tensor): - assert '"dltensor"' in str(tensor) - # assert self.pool.n_free_blocks() == 0 + name = "dltensor" if max_version is None else "dltensor_versioned" + assert f'"{name}"' in str(tensor) + # assert pool.n_free_blocks() == 0 # del array - # assert self.pool.n_free_blocks() == 0 + # assert pool.n_free_blocks() == 0 # del tensor - # assert self.pool.n_free_blocks() == 1 + # assert pool.n_free_blocks() == 1 - def test_deleter2(self): + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_deleter2(self, pool, max_version): # memory is freed when array2 is deleted, as tensor is consumed array = cupy.empty(10) - tensor = array.__dlpack__() - assert '"dltensor"' in str(tensor) - array2 = dlp.from_dlpack_capsule(tensor) - assert '"used_dltensor"' in str(tensor) - # assert self.pool.n_free_blocks() == 0 + tensor = array.__dlpack__(max_version=max_version) + name = "dltensor" if max_version is None else "dltensor_versioned" + assert f'"{name}"' in str(tensor) + array2 = cupy.from_dlpack( + DLDummy(tensor, device=array.__dlpack_device__()) + ) + assert f'"used_{name}"' in str(tensor) + # assert pool.n_free_blocks() == 0 # del array - # assert self.pool.n_free_blocks() == 0 + # assert pool.n_free_blocks() == 0 # del array2 - # assert self.pool.n_free_blocks() == 1 + # assert pool.n_free_blocks() == 1 # del tensor - # assert self.pool.n_free_blocks() == 1 + # assert pool.n_free_blocks() == 1 + @pytest.mark.skip("toDlpack() and fromDlpack() are not supported") + @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_multiple_consumption_error(self): # Prevent segfault, see #3611 array = cupy.empty(10) - tensor = array.__dlpack__() - array2 = dlp.from_dlpack_capsule(tensor) + tensor = array.toDlpack() + array2 = cupy.fromDlpack(tensor) with pytest.raises(ValueError) as e: - array3 = dlp.from_dlpack_capsule(tensor) + array3 = cupy.fromDlpack(tensor) assert "consumed multiple times" in str(e.value) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_core_elementwise.py b/dpnp/tests/third_party/cupy/core_tests/test_elementwise.py similarity index 62% rename from dpnp/tests/third_party/cupy/core_tests/test_core_elementwise.py rename to dpnp/tests/third_party/cupy/core_tests/test_elementwise.py index 8adb2bead2c2..2d268e53b37b 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_core_elementwise.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_elementwise.py @@ -4,10 +4,12 @@ import pytest import dpnp as cupy +from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing class TestElementwise(unittest.TestCase): + def check_copy(self, dtype, src_id, dst_id): with cuda.Device(src_id): src = testing.shaped_arange((2, 3, 4), dtype=dtype) @@ -16,13 +18,13 @@ def check_copy(self, dtype, src_id, dst_id): _core.elementwise_copy(src, dst) testing.assert_allclose(src, dst) - @pytest.mark.skip("`device` argument isn't supported") + @pytest.mark.skip("elementwise_copy() argument isn't supported") @testing.for_all_dtypes() def test_copy(self, dtype): device_id = cuda.Device().id self.check_copy(dtype, device_id, device_id) - @pytest.mark.skip("`device` argument isn't supported") + @pytest.mark.skip("elementwise_copy() argument isn't supported") @testing.for_all_dtypes() def test_copy_multigpu_nopeer(self, dtype): if cuda.runtime.deviceCanAccessPeer(0, 1) == 1: @@ -30,7 +32,7 @@ def test_copy_multigpu_nopeer(self, dtype): with self.assertRaises(ValueError): self.check_copy(dtype, 0, 1) - @pytest.mark.skip("`device` argument isn't supported") + @pytest.mark.skip("elementwise_copy() argument isn't supported") @testing.for_all_dtypes() def test_copy_multigpu_peer(self, dtype): if cuda.runtime.deviceCanAccessPeer(0, 1) != 1: @@ -67,8 +69,9 @@ def test_copy_orders(self, order): assert b.strides == tuple(x / b_cpu.itemsize for x in b_cpu.strides) -@pytest.mark.skip("`ElementwiseKernel` function isn't supported") +@pytest.mark.skip("`ElementwiseKernel` isn't supported") class TestElementwiseInvalidShape(unittest.TestCase): + def test_invalid_shape(self): with self.assertRaisesRegex(ValueError, "Out shape is mismatched"): f = cupy.ElementwiseKernel("T x", "T y", "y += x") @@ -77,67 +80,114 @@ def test_invalid_shape(self): f(x, y) -@pytest.mark.skip("`ElementwiseKernel` function isn't supported") +@pytest.mark.skip("`ElementwiseKernel` isn't supported") class TestElementwiseInvalidArgument(unittest.TestCase): + def test_invalid_kernel_name(self): with self.assertRaisesRegex(ValueError, "Invalid kernel name"): cupy.ElementwiseKernel("T x", "", "", "1") -@pytest.mark.skip("`iinfo` function isn't supported") class TestElementwiseType(unittest.TestCase): + + @testing.with_requires("numpy>=2.0") @testing.for_int_dtypes(no_bool=True) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(accept_error=OverflowError) def test_large_int_upper_1(self, xp, dtype): - a = xp.array([0], dtype=xp.int8) + a = xp.array([0], dtype=numpy.int8) b = xp.iinfo(dtype).max return a + b @testing.for_int_dtypes(no_bool=True) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(accept_error=OverflowError) def test_large_int_upper_2(self, xp, dtype): - a = xp.array([1], dtype=xp.int8) + if ( + numpy.issubdtype(dtype, numpy.unsignedinteger) + and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0" + ): + pytest.skip("numpy promotes dtype differently") + + a = xp.array([1], dtype=numpy.int8) b = xp.iinfo(dtype).max - 1 return a + b @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_large_int_upper_3(self, xp, dtype): + if ( + numpy.issubdtype(dtype, numpy.unsignedinteger) + and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0" + ): + pytest.skip("numpy promotes dtype differently") + elif ( + dtype in (numpy.uint64, numpy.ulonglong) + and not has_support_aspect64() + ): + pytest.skip("no fp64 support") + a = xp.array([xp.iinfo(dtype).max], dtype=dtype) - b = xp.int8(0) + b = numpy.int8(0) return a + b @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_large_int_upper_4(self, xp, dtype): + if ( + numpy.issubdtype(dtype, numpy.unsignedinteger) + and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0" + ): + pytest.skip("numpy promotes dtype differently") + elif ( + dtype in (numpy.uint64, numpy.ulonglong) + and not has_support_aspect64() + ): + pytest.skip("no fp64 support") + a = xp.array([xp.iinfo(dtype).max - 1], dtype=dtype) - b = xp.int8(1) + b = numpy.int8(1) return a + b @testing.for_int_dtypes(no_bool=True) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(accept_error=OverflowError) def test_large_int_lower_1(self, xp, dtype): - a = xp.array([0], dtype=xp.int8) + a = xp.array([0], dtype=numpy.int8) b = xp.iinfo(dtype).min return a + b @testing.for_int_dtypes(no_bool=True) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_array_equal(accept_error=OverflowError) def test_large_int_lower_2(self, xp, dtype): - a = xp.array([-1], dtype=xp.int8) + a = xp.array([-1], dtype=numpy.int8) b = xp.iinfo(dtype).min + 1 return a + b @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_large_int_lower_3(self, xp, dtype): + if ( + numpy.issubdtype(dtype, numpy.unsignedinteger) + and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0" + ): + pytest.skip("numpy promotes dtype differently") + elif ( + dtype in (numpy.uint64, numpy.ulonglong) + and not has_support_aspect64() + ): + pytest.skip("no fp64 support") + a = xp.array([xp.iinfo(dtype).min], dtype=dtype) - b = xp.int8(0) + b = numpy.int8(0) return a + b @testing.for_int_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_large_int_lower_4(self, xp, dtype): + if ( + dtype in (numpy.uint64, numpy.ulonglong) + and not has_support_aspect64() + ): + pytest.skip("no fp64 support") + a = xp.array([xp.iinfo(dtype).min + 1], dtype=dtype) - b = xp.int8(-1) + b = numpy.int8(-1) return a + b diff --git a/dpnp/tests/third_party/cupy/core_tests/test_flags.py b/dpnp/tests/third_party/cupy/core_tests/test_flags.py index 9dbccfbf0c4e..b2cc4fca59fb 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_flags.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_flags.py @@ -9,6 +9,7 @@ @pytest.mark.skip("class Flags is not exposed") class TestFlags(unittest.TestCase): + def setUp(self): self.flags = flags.Flags(1, 2, 3) @@ -42,6 +43,7 @@ def test_repr(self): ) ) class TestContiguityFlags(unittest.TestCase): + def setUp(self): self.flags = None diff --git a/dpnp/tests/third_party/cupy/core_tests/test_function.py b/dpnp/tests/third_party/cupy/core_tests/test_function.py new file mode 100644 index 000000000000..315dcbd09674 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_function.py @@ -0,0 +1,229 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy + +# from cupy._core import core +# from cupy.cuda import compiler +# from cupy.cuda import runtime +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "_compile_module_with_cache is not supported", allow_module_level=True +) + + +def _compile_func(kernel_name, code): + # workaround for hipRTC + extra_source = core._get_header_source() if runtime.is_hip else None + mod = compiler._compile_module_with_cache( + code, options=("--std=c++11",), extra_source=extra_source + ) + return mod.get_function(kernel_name) + + +class TestFunction(unittest.TestCase): + + def test_python_scalar(self): + code = """ +extern "C" __global__ void test_kernel(const double* a, double b, double* x) { + int i = threadIdx.x; + x[i] = a[i] + b; +} +""" + + a_cpu = numpy.arange(24, dtype=numpy.float64).reshape((4, 6)) + a = cupy.array(a_cpu) + b = float(2) + x = cupy.empty_like(a) + + func = _compile_func("test_kernel", code) + + func.linear_launch(a.size, (a, b, x)) + + expected = a_cpu + b + testing.assert_array_equal(x, expected) + + def test_numpy_scalar(self): + code = """ +extern "C" __global__ void test_kernel(const double* a, double b, double* x) { + int i = threadIdx.x; + x[i] = a[i] + b; +} +""" + + a_cpu = numpy.arange(24, dtype=numpy.float64).reshape((4, 6)) + a = cupy.array(a_cpu) + b = numpy.float64(2) + x = cupy.empty_like(a) + + func = _compile_func("test_kernel", code) + + func.linear_launch(a.size, (a, b, x)) + + expected = a_cpu + b + testing.assert_array_equal(x, expected) + + def test_numpy_dtype(self): + code = """ +extern "C" __global__ void test_kernel(const double* a, double3 b, double* x) { + int i = threadIdx.x; + x[i] = a[i] + b.x + b.y + b.z; +} +""" + + double3 = numpy.dtype( + {"names": ["x", "y", "z"], "formats": [numpy.float64] * 3} + ) + a_cpu = numpy.arange(24, dtype=numpy.float64) + a = cupy.array(a_cpu) + b = numpy.random.rand(3).view(double3) + x = cupy.empty_like(a) + + func = _compile_func("test_kernel", code) + + func.linear_launch(a.size, (a, b, x)) + + expected = a_cpu + b["x"] + b["y"] + b["z"] + testing.assert_array_equal(x, expected) + + def test_static_array(self): + code = """ +struct double5 { + double value[5]; + __device__ const double& operator[](size_t i) const { return value[i]; } +}; + +extern "C" __global__ void test_kernel(const double* a, double5 b, double* x) { + int i = threadIdx.x; + x[i] = a[i] + b[0] + b[1] + b[2] + b[3] + b[4]; +} +""" + + a_cpu = numpy.arange(24, dtype=numpy.float64) + a = cupy.array(a_cpu) + x = cupy.empty_like(a) + + func = _compile_func("test_kernel", code) + + # We cannot pass np.ndarray kernel arguments of size > 1 + b = numpy.arange(5).astype(numpy.float64) + with pytest.raises(TypeError): + func.linear_launch(a.size, (a, b, x)) + + double5 = numpy.dtype( + {"names": ["dummy"], "formats": [(numpy.float64, (5,))]} + ) + func.linear_launch(a.size, (a, b.view(double5), x)) + + expected = a_cpu + b.sum() + testing.assert_array_equal(x, expected) + + def test_custom_user_struct(self): + struct_definition = """ +struct custom_user_struct { + int4 a; + char b; + double c[2]; + short1 d; + unsigned long long int e[3]; +}; +""" + + # first step is to determine struct memory layout + struct_layout_code = """ +{struct_definition} +extern "C" __global__ void get_struct_layout( + unsigned long long *itemsize, + unsigned long long *sizes, + unsigned long long *offsets) {{ + const custom_user_struct* ptr = nullptr; + + itemsize[0] = sizeof(custom_user_struct); + + sizes[0] = sizeof(ptr->a); + sizes[1] = sizeof(ptr->b); + sizes[2] = sizeof(ptr->c); + sizes[3] = sizeof(ptr->d); + sizes[4] = sizeof(ptr->e); + + offsets[0] = (unsigned long long)&ptr->a; + offsets[1] = (unsigned long long)&ptr->b; + offsets[2] = (unsigned long long)&ptr->c; + offsets[3] = (unsigned long long)&ptr->d; + offsets[4] = (unsigned long long)&ptr->e; +}} +""".format( + struct_definition=struct_definition + ) + + itemsize = cupy.ndarray(shape=(1,), dtype=numpy.uint64) + sizes = cupy.ndarray(shape=(5,), dtype=numpy.uint64) + offsets = cupy.ndarray(shape=(5,), dtype=numpy.uint64) + func = _compile_func("get_struct_layout", struct_layout_code) + func.linear_launch(1, (itemsize, sizes, offsets)) + + # Build structure data type recursively + names = list("abcde") + itemsize = cupy.asnumpy(itemsize).item() + sizes = cupy.asnumpy(sizes).tolist() + offsets = cupy.asnumpy(offsets).tolist() + + def make_packed(basetype, N, itemsize): + assert 0 < N <= 4, N + names = list("xyzw")[:N] + formats = [basetype] * N + return numpy.dtype( + dict(names=names, formats=formats, itemsize=itemsize) + ) + + # structure member data types + int4 = make_packed(numpy.int32, 4, sizes[0]) + char = make_packed(numpy.int8, 1, sizes[1]) + double2 = make_packed(numpy.float64, 2, sizes[2]) + short1 = make_packed(numpy.int16, 1, sizes[3]) + ulong3 = make_packed(numpy.uint64, 3, sizes[4]) + + formats = [int4, char, double2, short1, ulong3] + struct_dtype = numpy.dtype( + dict( + names=names, formats=formats, offsets=offsets, itemsize=itemsize + ) + ) + + s = numpy.empty(shape=(1,), dtype=struct_dtype) + s["a"] = numpy.arange(0, 4).astype(numpy.int32).view(int4) + s["b"] = numpy.arange(4, 5).astype(numpy.int8).view(char) + s["c"] = numpy.arange(5, 7).astype(numpy.float64).view(double2) + s["d"] = numpy.arange(7, 8).astype(numpy.int16).view(short1) + s["e"] = numpy.arange(8, 11).astype(numpy.uint64).view(ulong3) + + # test kernel code + code = """ +{struct_definition} +extern "C" __global__ void test_kernel(const double* a, + custom_user_struct s, + double* x) {{ + int i = threadIdx.x; + double sum = s.a.x + s.a.y + s.a.z + s.a.w; + sum += s.b; + sum += s.c[0] + s.c[1]; + sum += s.d.x; + sum += s.e[0] + s.e[1] + s.e[2]; + x[i] = a[i] + sum; +}} +""".format( + struct_definition=struct_definition + ) + + a_cpu = numpy.arange(24, dtype=numpy.float64) + a = cupy.array(a_cpu) + x = cupy.empty_like(a) + + func = _compile_func("test_kernel", code) + func.linear_launch(a.size, (a, s, x)) + + expected = a_cpu + 55.0 + testing.assert_array_equal(x, expected) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_gufuncs.py b/dpnp/tests/third_party/cupy/core_tests/test_gufuncs.py new file mode 100644 index 000000000000..954ec4b4ea3b --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_gufuncs.py @@ -0,0 +1,308 @@ +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# from cupy._core._gufuncs import _GUFunc + +pytest.skip("GUFunc is not supported", allow_module_level=True) + + +class TestGUFuncSignature: + @pytest.mark.parametrize( + "signature", + [ + ("(i,j)->(i,j)", [("i", "j")], [("i", "j")]), + ("->(i)", [()], [("i",)]), + ("(i,j),(j,k)->(k,l)", [("i", "j"), ("j", "k")], [("k", "l")]), + ("()->()", [()], [()]), + ], + ) + def test_signature_parsing(self, signature): + i, o = cupy._core._gufuncs._parse_gufunc_signature(signature[0]) + assert i == signature[1] + assert o == signature[2] + + @pytest.mark.parametrize( + "signature", + [ + "(i,j)(i,j)", + "(i,j)-(i,j)", + "(i,j)(i,j)->(i,j)", + "j->(i", + "", + "()->()->", + ], + ) + def test_invalid_signature_parsing(self, signature): + with pytest.raises(ValueError): + cupy._core._gufuncs._parse_gufunc_signature(signature) + + +class TestGUFuncAxes: + def _get_gufunc(self, signature): + def func(x): + return x + + return _GUFunc(func, signature) + + def _get_gufunc_scalar(self, signature): + def func(x): + return x.sum() + + return _GUFunc(func, signature) + + @pytest.mark.parametrize( + "axes", + [ + ((-1, -2), (-1, -2)), + ((0, 1), (0, 1)), + ((0, 1), (-1, -2)), + ((1, 2), (-1, -2)), + ((1, 2), (1, 2)), + ((1, 2), (2, 3)), + ((2, 3), (-1, -2)), + ((2, 3), (0, 1)), + ((2, 3), (1, 2)), + ((0, 3), (1, 2)), + ((0, 3), (2, 0)), + ], + ) + @testing.numpy_cupy_array_equal() + def test_axes_selection(self, xp, axes): + x = testing.shaped_arange((2, 3, 4, 5), xp=xp) + if xp is cupy: + return self._get_gufunc("(i,j)->(i,j)")(x, axes=list(axes)) + else: + return numpy.moveaxis(x, axes[0], axes[1]) + + @pytest.mark.parametrize( + "axes", + [ + (-1, -2), + (0, 1), + (1, 2), + (2, 3), + (0, 2), + (0, 3), + (1, 3), + (3, 0), + (2, 0), + (2, 1), + (1, 0), + ], + ) + @testing.numpy_cupy_array_equal() + def test_axes_selection_single(self, xp, axes): + x = testing.shaped_arange((2, 3, 4, 5), xp=xp) + if xp is cupy: + return self._get_gufunc("(i)->(i)")(x, axes=list(axes)) + else: + return numpy.moveaxis(x, axes[0], axes[1]) + + @pytest.mark.parametrize("axis", [0, 1, 2, 3]) + @testing.numpy_cupy_array_equal() + def test_axis(self, xp, axis): + x = testing.shaped_arange((2, 3, 4, 5), xp=xp) + if xp is cupy: + return self._get_gufunc_scalar("(i)->()")(x, axis=axis) + else: + return x.sum(axis=axis) + + def test_axis_invalid(self): + x = testing.shaped_arange((2, 3, 4, 5)) + with pytest.raises(ValueError): + self._get_gufunc("(i, j)->(i, j)")(x, axis=((0, 1), (0, 1))) + + @pytest.mark.parametrize("supports_batched", [True, False]) + def test_supports_batched(self, supports_batched): + x = testing.shaped_arange((2, 3, 4, 5)) + + def func(x): + nonlocal supports_batched + if supports_batched: + assert x.ndim == 4 + else: + assert x.ndim == 2 + return x + + gu_func = _GUFunc( + func, "(i,j)->(i,j)", supports_batched=supports_batched + ) + gu_func(x) + + +class TestGUFuncOut: + def _get_gufunc(self): + def func(x): + return x + + return _GUFunc(func, "(i,j)->(i,j)") + + def test_out_array(self): + x = testing.shaped_arange((2, 3, 4, 5)) + out = cupy.empty((2, 3, 4, 5)) + self._get_gufunc()(x, out=out) + testing.assert_allclose(x, out) + + def test_supports_out(self): + x = testing.shaped_arange((2, 3, 4, 5)) + out = cupy.empty((2, 3, 4, 5)) + out_ptr = out.data.ptr + + def func(x, out=None): + nonlocal out_ptr + # Base is a view of the output due to the batching + assert out.base.data.ptr == out_ptr + out[:] = x + + gu_func = _GUFunc(func, "(i,j)->(i,j)", supports_out=True) + gu_func(x, out=out) + testing.assert_allclose(x, out) + + def test_invalid_output_shape(self): + x = testing.shaped_arange((2, 3, 4, 5)) + out = cupy.empty((3, 3, 4, 5)) + with pytest.raises(ValueError): + self._get_gufunc()(x, out=out) + + def test_invalid_output_dtype(self): + x = testing.shaped_arange((2, 3, 4, 5)) + out = cupy.empty((2, 3, 4, 5), dtype="h") + with pytest.raises(TypeError): + self._get_gufunc()(x, out=out) + + +class TestGUFuncDtype: + @testing.for_all_dtypes(name="dtype_i", no_bool=True, no_complex=True) + @testing.for_all_dtypes(name="dtype_o", no_bool=True, no_complex=True) + def test_dtypes(self, dtype_i, dtype_o): + x = testing.shaped_arange((2, 3, 4, 5), dtype=dtype_i) + if numpy.can_cast(dtype_o, x.dtype): + + def func(x): + return x + + gufunc = _GUFunc(func, "(i,j)->(i,j)") + z = gufunc(x, dtype=dtype_o) + assert z.dtype == dtype_o + testing.assert_allclose(z, x) + + +class TestGUFuncOrder: + + @pytest.mark.parametrize("order", ["C", "F", "K"]) + @testing.numpy_cupy_array_equal(strides_check=True) + def test_order(self, xp, order): + x = testing.shaped_arange((2, 3, 4), xp=xp) + if xp is cupy: + + def default(x): + return x + + gu_func = _GUFunc(default, "(i, j, k)->(i, j, k)") + return gu_func(x, order=order) + else: + return xp.asarray(x, order=order) + + @pytest.mark.parametrize("order", [("F", "C", "C"), ("F", "F", "F")]) + def test_order_a(self, order): + x = testing.shaped_arange((2, 3, 4), order=order[0]) + y = testing.shaped_arange((2, 3, 4), order=order[1]) + + def default(x, y): + return x + + gu_func = _GUFunc(default, "(i,j,k),(i,j,k)->(i,j,k)") + z = gu_func(x, y, order="A") + if order[2] == "C": + assert z.flags.c_contiguous + else: + assert z.flags.f_contiguous + + +class TestGUFuncSignatures: + def test_signatures(self): + dtypes = "fdihq" + dtypes_access = {d: None for d in dtypes} + + def integers(x, y): + nonlocal dtypes_access + dtypes_access[numpy.dtype(x.dtype).char] = integers + return x + y + + def floats(x, y): + nonlocal dtypes_access + dtypes_access[numpy.dtype(x.dtype).char] = floats + return x + y + + def default(x, y): + nonlocal dtypes_access + dtypes_access[numpy.dtype(x.dtype).char] = default + return x + y + + sigs = (("ii->i", integers), ("dd->d", floats)) + gu_func = _GUFunc(default, "(i),(i)->(i)", signatures=sigs) + for dtype in dtypes: + x = cupy.array([10], dtype=dtype) + y = x + gu_func(x, y, casting="no") + if dtype in "i": + assert dtypes_access[dtype] == integers + elif dtype in "d": + assert dtypes_access[dtype] == floats + else: + assert dtypes_access[dtype] == default + + @pytest.mark.parametrize("sig,", ["ii->i", "i", ("i", "i", "i")]) + def test_signature_lookup(self, sig): + called = False + + def func(x, y): + nonlocal called + called = True + return x + y + + def default(x, y): + return x + y + + dtypes = "fdhq" + + sigs = (("ii->i", func),) + gu_func = _GUFunc(default, "(i),(i)->(i)", signatures=sigs) + for dtype in dtypes: + x = cupy.array([10], dtype=dtype) + y = x + gu_func(x, y, casting="no") + assert not called + + x = cupy.array([10], dtype="d") + y = x + z = gu_func(x, y, casting="unsafe", signature=sig) + assert z.dtype == numpy.int32 + assert called + + @pytest.mark.parametrize("sigs,", [("i",), ("",), ("iii->i",), ("ii->",)]) + def test_invalid_signatures(self, sigs): + + def default(x, y): + return x + y + + with pytest.raises(ValueError): + _GUFunc(default, "(i),(i)->(i)", signatures=sigs) + + @pytest.mark.parametrize("sig,", ["i->i", "id->i", ""]) + def test_invalid_lookup(self, sig): + + def default(x, y): + return x + y + + sigs = (("ii->i", default),) + gu_func = _GUFunc(default, "(i),(i)->(i)", signatures=sigs) + _GUFunc(default, "(i),(i)->(i)", signatures=sigs) + + x = cupy.array([10], dtype="d") + y = x + with pytest.raises(TypeError): + gu_func(x, y, casting="unsafe", signature=sig) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_include.py b/dpnp/tests/third_party/cupy/core_tests/test_include.py new file mode 100644 index 000000000000..1119b13914e2 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_include.py @@ -0,0 +1,98 @@ +import os +from unittest import mock + +import pytest + +import dpnp as cupy + +pytest.skip("CUDA compile is not supported", allow_module_level=True) + +_code_base = """ +#include + +#include +#include +#include +#include +#include +#include +""" + +_code_nvcc = ( + _code_base + + """ +#include + +int main() { + return 0; +} +""" +) + +_code_nvrtc = ( + _code_base + + """ + +__device__ void kernel() { +} +""" +) + + +@pytest.mark.skipif(cupy.cuda.runtime.is_hip, reason="for CUDA") +class TestIncludesCompileCUDA: + def _get_cuda_archs(self): + cuda_ver = cupy.cuda.runtime.runtimeGetVersion() + to_exclude = set((int(a) for a in cupy.cuda.compiler._tegra_archs)) + if cuda_ver < 11000: + # CUDA 10.2 (Tegra excluded) + archs = (30, 35, 50, 52, 60, 61, 70, 75) + elif cuda_ver < 11010: + # CUDA 11.0 + archs = (35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80) + elif cuda_ver < 11020: + # CUDA 11.1 + archs = (35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80, 86) + else: + # CUDA 11.2+ + archs = cupy.cuda.nvrtc.getSupportedArchs() + if cuda_ver == 11020 or cuda_ver >= 12000: + to_exclude.add(69) + archs = tuple(set(archs) - to_exclude) + + return archs + + def _get_options(self): + return ( + "-std=c++14", + *cupy._core.core._get_cccl_include_options(), + "-I{}".format(cupy._core.core._get_header_dir_path()), + "-I{}".format(os.path.join(cupy.cuda.get_cuda_path(), "include")), + ) + + def test_nvcc(self): + options = self._get_options() + for arch in self._get_cuda_archs(): + cupy.cuda.compiler.compile_using_nvcc( + _code_nvcc, options=options, arch=arch + ) + + def test_nvrtc(self): + cuda_ver = cupy.cuda.runtime.runtimeGetVersion() + options = self._get_options() + for arch in self._get_cuda_archs(): + with mock.patch( + "cupy.cuda.compiler._get_arch_for_options_for_nvrtc", + lambda _: (f"-arch=compute_{arch}", "ptx"), + ): + cupy.cuda.compiler.compile_using_nvrtc( + _code_nvrtc, options=options + ) + if cuda_ver >= 11010: + with mock.patch( + "cupy.cuda.compiler._get_arch_for_options_for_nvrtc", + lambda _: (f"-arch=sm_{arch}", "cubin"), + ): + cupy.cuda.compiler.compile_using_nvrtc( + _code_nvrtc, options=options + ) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_internal.py b/dpnp/tests/third_party/cupy/core_tests/test_internal.py new file mode 100644 index 000000000000..5e41f3b0310a --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_internal.py @@ -0,0 +1,259 @@ +import math +import unittest + +import numpy +import pytest + +# from cupy._core import internal +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "CuPy internal functions are not supported", allow_module_level=True +) + + +class TestProd(unittest.TestCase): + + def test_empty(self): + assert internal.prod([]) == 1 + + def test_one(self): + assert internal.prod([2]) == 2 + + def test_two(self): + assert internal.prod([2, 3]) == 6 + + +class TestProdSequence(unittest.TestCase): + + def test_empty(self): + assert internal.prod_sequence(()) == 1 + + def test_one(self): + assert internal.prod_sequence((2,)) == 2 + + def test_two(self): + assert internal.prod_sequence((2, 3)) == 6 + + +class TestGetSize: + + def test_none(self): + with testing.assert_warns(DeprecationWarning): + assert internal.get_size(None) == () + + def check_collection(self, a): + assert internal.get_size(a) == tuple(a) + + def test_list(self): + self.check_collection([1, 2, 3]) + + def test_tuple(self): + self.check_collection((1, 2, 3)) + + def test_int(self): + assert internal.get_size(1) == (1,) + + def test_numpy_int(self): + assert internal.get_size(numpy.int32(1)) == (1,) + + def test_numpy_zero_dim_ndarray(self): + assert internal.get_size(numpy.array(1)) == (1,) + + def test_tuple_of_numpy_scalars(self): + assert internal.get_size((numpy.int32(1), numpy.array(1))) == (1, 1) + + @pytest.mark.parametrize( + "value", [True, numpy.bool_(True), numpy.array(True, dtype="?")] + ) + def test_bool(self, value): + with pytest.raises(TypeError): + internal.get_size(value) + with pytest.raises(TypeError): + internal.get_size((value, value)) + + def test_float(self): + # `internal.get_size` is not responsible to interpret values as + # integers. + assert internal.get_size(1.0) == (1.0,) + + +class TestVectorEqual(unittest.TestCase): + + def test_empty(self): + assert internal.vector_equal([], []) is True + + def test_not_equal(self): + assert internal.vector_equal([1, 2, 3], [1, 2, 0]) is False + + def test_equal(self): + assert internal.vector_equal([-1, 0, 1], [-1, 0, 1]) is True + + def test_different_size(self): + assert internal.vector_equal([1, 2, 3], [1, 2]) is False + + +class TestGetCContiguity(unittest.TestCase): + + def test_zero_in_shape(self): + assert internal.get_c_contiguity((1, 0, 1), (1, 1, 1), 3) + + def test_all_one_shape(self): + assert internal.get_c_contiguity((1, 1, 1), (1, 1, 1), 3) + + def test_normal1(self): + assert internal.get_c_contiguity((3, 4, 3), (24, 6, 2), 2) + + def test_normal2(self): + assert internal.get_c_contiguity((3, 1, 3), (6, 100, 2), 2) + + def test_normal3(self): + assert internal.get_c_contiguity((3,), (4,), 4) + + def test_normal4(self): + assert internal.get_c_contiguity((), (), 4) + + def test_normal5(self): + assert internal.get_c_contiguity((3, 1), (4, 8), 4) + + def test_no_contiguous1(self): + assert not internal.get_c_contiguity((3, 4, 3), (30, 6, 2), 2) + + def test_no_contiguous2(self): + assert not internal.get_c_contiguity((3, 1, 3), (24, 6, 2), 2) + + def test_no_contiguous3(self): + assert not internal.get_c_contiguity((3, 1, 3), (6, 6, 4), 2) + + +class TestInferUnknownDimension(unittest.TestCase): + + def test_known_all(self): + assert internal.infer_unknown_dimension((1, 2, 3), 6) == [1, 2, 3] + + def test_multiple_unknown(self): + with self.assertRaises(ValueError): + internal.infer_unknown_dimension((-1, 1, -1), 10) + + def test_infer(self): + assert internal.infer_unknown_dimension((-1, 2, 3), 12) == [2, 2, 3] + + +@testing.parameterize( + {"slice": (2, 8, 1), "expect": (2, 8, 1)}, + {"slice": (2, None, 1), "expect": (2, 10, 1)}, + {"slice": (2, 1, 1), "expect": (2, 2, 1)}, + {"slice": (2, -1, 1), "expect": (2, 9, 1)}, + {"slice": (None, 8, 1), "expect": (0, 8, 1)}, + {"slice": (-3, 8, 1), "expect": (7, 8, 1)}, + {"slice": (11, 8, 1), "expect": (10, 10, 1)}, + {"slice": (11, 11, 1), "expect": (10, 10, 1)}, + {"slice": (-11, 8, 1), "expect": (0, 8, 1)}, + {"slice": (-11, -11, 1), "expect": (0, 0, 1)}, + {"slice": (8, 2, -1), "expect": (8, 2, -1)}, + {"slice": (8, None, -1), "expect": (8, -1, -1)}, + {"slice": (8, 9, -1), "expect": (8, 8, -1)}, + {"slice": (8, -3, -1), "expect": (8, 7, -1)}, + {"slice": (None, 8, -1), "expect": (9, 8, -1)}, + {"slice": (-3, 6, -1), "expect": (7, 6, -1)}, + {"slice": (10, 10, -1), "expect": (9, 9, -1)}, + {"slice": (10, 8, -1), "expect": (9, 8, -1)}, + {"slice": (9, 10, -1), "expect": (9, 9, -1)}, + {"slice": (9, 9, -1), "expect": (9, 9, -1)}, + {"slice": (9, 8, -1), "expect": (9, 8, -1)}, + {"slice": (8, 8, -1), "expect": (8, 8, -1)}, + {"slice": (-9, -8, -1), "expect": (1, 1, -1)}, + {"slice": (-9, -9, -1), "expect": (1, 1, -1)}, + {"slice": (-9, -10, -1), "expect": (1, 0, -1)}, + {"slice": (-9, -11, -1), "expect": (1, -1, -1)}, + {"slice": (-9, -12, -1), "expect": (1, -1, -1)}, + {"slice": (-10, -9, -1), "expect": (0, 0, -1)}, + {"slice": (-10, -10, -1), "expect": (0, 0, -1)}, + {"slice": (-10, -11, -1), "expect": (0, -1, -1)}, + {"slice": (-10, -12, -1), "expect": (0, -1, -1)}, + {"slice": (-11, 8, -1), "expect": (-1, -1, -1)}, + {"slice": (-11, -9, -1), "expect": (-1, -1, -1)}, + {"slice": (-11, -10, -1), "expect": (-1, -1, -1)}, + {"slice": (-11, -11, -1), "expect": (-1, -1, -1)}, + {"slice": (-11, -12, -1), "expect": (-1, -1, -1)}, +) +class TestCompleteSlice(unittest.TestCase): + + def test_complete_slice(self): + assert internal.complete_slice(slice(*self.slice), 10) == slice( + *self.expect + ) + + +class TestCompleteSliceError(unittest.TestCase): + + def test_invalid_step_value(self): + with self.assertRaises(ValueError): + internal.complete_slice(slice(1, 1, 0), 1) + + def test_invalid_step_type(self): + with self.assertRaises(TypeError): + internal.complete_slice(slice(1, 1, (1, 2)), 1) + + def test_invalid_start_type(self): + with self.assertRaises(TypeError): + internal.complete_slice(slice((1, 2), 1, 1), 1) + with self.assertRaises(TypeError): + internal.complete_slice(slice((1, 2), 1, -1), 1) + + def test_invalid_stop_type(self): + with self.assertRaises(TypeError): + internal.complete_slice(slice((1, 2), 1, 1), 1) + with self.assertRaises(TypeError): + internal.complete_slice(slice((1, 2), 1, -1), 1) + + +@testing.parameterize( + {"x": 0, "expect": 0}, + {"x": 1, "expect": 1}, + {"x": 2, "expect": 2}, + {"x": 3, "expect": 4}, + {"x": 2**10, "expect": 2**10}, + {"x": 2**10 - 1, "expect": 2**10}, + {"x": 2**10 + 1, "expect": 2**11}, + {"x": 2**40, "expect": 2**40}, + {"x": 2**40 - 1, "expect": 2**40}, + {"x": 2**40 + 1, "expect": 2**41}, +) +class TestClp2(unittest.TestCase): + + def test_clp2(self): + assert internal.clp2(self.x) == self.expect + + +@testing.parameterize( + *testing.product( + { + "value": [ + 0.0, + 1.0, + -1.0, + 0.25, + -0.25, + 11.0, + -11.0, + 2**-15, + -(2**-15), # Denormalized Number + float("inf"), + float("-inf"), + ], + } + ) +) +class TestConvertFloat16(unittest.TestCase): + + def test_conversion(self): + half = internal.to_float16(self.value) + assert internal.from_float16(half) == self.value + + +class TestConvertFloat16Nan(unittest.TestCase): + + def test_conversion(self): + half = internal.to_float16(float("nan")) + assert math.isnan(internal.from_float16(half)) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py index da848ec2d222..808a8a8e5f99 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py @@ -138,7 +138,9 @@ class UserNdarray(cupy.ndarray): ) ) class TestNdarrayInitStrides(unittest.TestCase): + # Check the strides given shape, itemsize and order. + @testing.numpy_cupy_equal() def test_strides(self, xp): arr = xp.ndarray(self.shape, dtype=self.dtype, order=self.order) @@ -149,6 +151,7 @@ def test_strides(self, xp): class TestNdarrayInitRaise(unittest.TestCase): + def test_unsupported_type(self): arr = numpy.ndarray((2, 3), dtype=object) with pytest.raises(TypeError): @@ -177,6 +180,7 @@ def test_excessive_ndim(self): ) @pytest.mark.skip("deepcopy() is not supported") class TestNdarrayDeepCopy(unittest.TestCase): + def _check_deepcopy(self, arr, arr2): assert arr.data is not arr2.data assert arr.shape == arr2.shape @@ -218,6 +222,7 @@ def test_deepcopy_multi_device(self): class TestNdarrayCopy: + @testing.multi_gpu(2) @testing.for_orders("CFA") def test_copy_multi_device_non_contiguous(self, order): @@ -265,6 +270,7 @@ def test_copy_multi_device_with_stream(self): class TestNdarrayShape(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_shape_set(self, xp): arr = xp.ndarray((2, 3)) @@ -298,6 +304,7 @@ def test_shape_need_copy(self): @pytest.mark.skip("CUDA interface is not supported") class TestNdarrayCudaInterface(unittest.TestCase): + def test_cuda_array_interface(self): arr = cupy.zeros(shape=(2, 3), dtype=cupy.float64) iface = arr.__cuda_array_interface__ @@ -428,6 +435,7 @@ def test_cuda_array_interface_stream(self): @pytest.mark.skip("CUDA interface is not supported") class TestNdarrayCudaInterfaceNoneCUDA(unittest.TestCase): + def setUp(self): self.arr = cupy.zeros(shape=(2, 3), dtype=cupy.float64) @@ -449,6 +457,7 @@ def test_cuda_array_interface_getattr(self): ) ) class TestNdarrayTake(unittest.TestCase): + shape = (3, 4, 5) @testing.for_all_dtypes() @@ -472,6 +481,7 @@ def test_take(self, xp, dtype): ) ) class TestNdarrayTakeWithInt(unittest.TestCase): + shape = (3, 4, 5) @testing.for_all_dtypes() @@ -490,6 +500,7 @@ def test_take(self, xp, dtype): ) ) class TestNdarrayTakeWithIntWithOutParam(unittest.TestCase): + shape = (3, 4, 5) @testing.for_all_dtypes() @@ -512,6 +523,7 @@ def test_take(self, xp, dtype): ) ) class TestScalaNdarrayTakeWithIntWithOutParam(unittest.TestCase): + shape = () @testing.for_all_dtypes() @@ -530,6 +542,7 @@ def test_take(self, xp, dtype): {"shape": (), "indices": (0,), "axis": 2}, ) class TestNdarrayTakeErrorAxisOverRun(unittest.TestCase): + def test_axis_overrun1(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp) @@ -547,6 +560,7 @@ def test_axis_overrun2(self): {"shape": (), "indices": (), "out_shape": (1,)}, ) class TestNdarrayTakeErrorShapeMismatch(unittest.TestCase): + def test_shape_mismatch(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp) @@ -561,6 +575,7 @@ def test_shape_mismatch(self): {"shape": (), "indices": (), "out_shape": ()}, ) class TestNdarrayTakeErrorTypeMismatch(unittest.TestCase): + def test_output_type_mismatch(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp, numpy.int32) @@ -576,6 +591,7 @@ def test_output_type_mismatch(self): {"shape": (3, 0), "indices": (2,), "axis": 0}, ) class TestZeroSizedNdarrayTake(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_output_type_mismatch(self, xp): a = testing.shaped_arange(self.shape, xp, numpy.int32) @@ -588,6 +604,7 @@ def test_output_type_mismatch(self, xp): {"shape": (0,), "indices": (1, 1)}, ) class TestZeroSizedNdarrayTakeIndexError(unittest.TestCase): + def test_output_type_mismatch(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp, numpy.int32) @@ -597,6 +614,7 @@ def test_output_type_mismatch(self): class TestSize(unittest.TestCase): + @testing.numpy_cupy_equal() def test_size_without_axis(self, xp): x = testing.shaped_arange((3, 4, 5), xp, numpy.int32) @@ -638,6 +656,7 @@ def test_size_zero_dim_array_with_axis(self): @pytest.mark.skip("python interface is not supported") class TestPythonInterface(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_bytes_tobytes(self, xp, dtype): @@ -674,6 +693,7 @@ def test_format(self, xp): @pytest.mark.skip("implicit conversation to numpy does not raise an exception") class TestNdarrayImplicitConversion(unittest.TestCase): + def test_array(self): a = testing.shaped_arange((3, 4, 5), cupy, numpy.int64) with pytest.raises(TypeError): @@ -681,6 +701,7 @@ def test_array(self): class C(cupy.ndarray): + def __new__(cls, *args, info=None, **kwargs): obj = super().__new__(cls, *args, **kwargs) obj.info = info @@ -694,6 +715,7 @@ def __array_finalize__(self, obj): @pytest.mark.skip("SAT-7168: explicit constructor call is not supported") class TestNdarraySubclass: + def test_explicit_constructor_call(self): a = C([0, 1, 2, 3], info="information") assert type(a) is C diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_adv_indexing.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_adv_indexing.py new file mode 100644 index 000000000000..d9451cb70a57 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_adv_indexing.py @@ -0,0 +1,947 @@ +import itertools + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + + +def perm(iterable): + return list(itertools.permutations(iterable)) + + +@testing.parameterize( + *testing.product( + { + "shape": [(4, 4, 4)], + "indexes": ( + perm(([1, 0], slice(None))) + + perm(([1, 0], Ellipsis)) + + perm(([1, 2], None, slice(None))) + + + # perm(([1, 0], 1, slice(None))) + # dpctl-1911 + perm(([1, 2], slice(0, 2), slice(None))) + + perm((1, [1, 2], 1)) + + perm(([[1, -1], [0, 3]], slice(None), slice(None))) + + + # perm(([1, 0], [3, 2], slice(None))) + # dpctl-1912 + # perm((slice(0, 3, 2), [1, 2], [1, 0])) + # dpctl-1912 + perm(([1, 0], [2, 1], [3, 1])) + + + # perm(([1, 0], 1, [3, 1])) + # dpctl-1912 + # perm(([1, 2], [[1, 0], [0, 1], [-1, 1]], slice(None))) + # dpctl-1912 + # perm((None, [1, 2], [1, 0])) + # dpctl-1912 + perm((numpy.array(0), numpy.array(-1))) + + perm((numpy.array(0), None)) + + perm((1, numpy.array(2), slice(None))) + ), + } + ) +) +class TestArrayAdvancedIndexingGetitemPerm: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_getitem(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + indexes = self.indexes + if xp is cupy: + # dpnp does not support a list in advanced indexing + indexes = tuple( + cupy.asarray(x) if isinstance(x, list) else x for x in indexes + ) + return a[indexes] + + +@testing.parameterize( + {"shape": (2, 3, 4), "indexes": numpy.array(-1)}, + {"shape": (2, 3, 4), "indexes": (None, [1, 0], [0, 2], slice(None))}, + # {'shape': (2, 3, 4), 'indexes': (None, [0, 1], None, [2, 1], slice(None))}, # dpctl-1912 + {"shape": (2, 3, 4), "indexes": numpy.array([1, 0])}, + {"shape": (2, 3, 4), "indexes": [1]}, + {"shape": (2, 3, 4), "indexes": [1, 1]}, + {"shape": (2, 3, 4), "indexes": [1, -1]}, + # {'shape': (2, 3, 4), 'indexes': ([0, 1], slice(None), [[2, 1], [3, 1]])}, # dpctl-1912 + # mask + {"shape": (10,), "indexes": (numpy.random.choice([False, True], (10,)),)}, + {"shape": (2, 3, 4), "indexes": (1, numpy.array([True, False, True]))}, + {"shape": (2, 3, 4), "indexes": (numpy.array([True, False]), 1)}, + { + "shape": (2, 3, 4), + "indexes": (slice(None), 2, numpy.array([True, False, True, False])), + }, + {"shape": (2, 3, 4), "indexes": (slice(None), 2, False)}, + { + "shape": (2, 3, 4), + "indexes": (numpy.random.choice([False, True], (2, 3, 4)),), + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.array([True, False, True])), + }, + { + "shape": (2, 3, 4), + "indexes": ( + slice(None), + slice(None), + numpy.array([True, False, False, True]), + ), + }, + { + "shape": (2, 3, 4), + "indexes": (1, 2, numpy.array([True, False, False, True])), + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.random.choice([False, True], (3, 4))), + }, + {"shape": (2, 3, 4), "indexes": numpy.random.choice([False, True], (2, 3))}, + # {'shape': (2, 3, 4), dpctl-1911 + # 'indexes': (1, None, numpy.array([True, False, True]))}, + # empty arrays + {"shape": (2, 3, 4), "indexes": []}, + {"shape": (2, 3, 4), "indexes": numpy.array([], dtype=numpy.int32)}, + {"shape": (2, 3, 4), "indexes": numpy.array([[]], dtype=numpy.int32)}, + {"shape": (2, 3, 4), "indexes": (slice(None), [])}, + {"shape": (2, 3, 4), "indexes": ([], [])}, + {"shape": (2, 3, 4), "indexes": ([[]],)}, + {"shape": (2, 3, 4), "indexes": numpy.array([], dtype=numpy.bool_)}, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.array([], dtype=numpy.bool_)), + }, + {"shape": (2, 3, 4), "indexes": numpy.array([[], []], dtype=numpy.bool_)}, + {"shape": (2, 3, 4), "indexes": numpy.empty((0, 0, 4), bool)}, + # multiple masks + # {'shape': (2, 3, 4), 'indexes': (True, [True, False])}, dpctl-1911 + # {'shape': (2, 3, 4), 'indexes': (False, [True, False])}, dpctl-1911 + # {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2))}, dpctl-1911 + # {'shape': (2, 3, 4), 'indexes': (False, [[1]], slice(1, 2))}, dpctl-1911 + # {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2), True)}, dpctl-1911 + # {'shape': (2, 3, 4), 'indexes': (True, [[1]], slice(1, 2), False)}, dpctl-1911 + { + "shape": (2, 3, 4), + "indexes": ( + Ellipsis, + [[1, 1, -3], [0, 2, 2]], + [True, False, True, True], + ), + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.empty((0, 3), bool), numpy.empty(0, bool)), + }, + # zero-dim and zero-sized arrays + {"shape": (), "indexes": Ellipsis}, + {"shape": (), "indexes": ()}, + {"shape": (), "indexes": None}, + {"shape": (), "indexes": True}, + {"shape": (), "indexes": (True,)}, + # {'shape': (), 'indexes': (False, True, True)}, dpctl-1911 + {"shape": (), "indexes": numpy.ones((), dtype=numpy.bool_)}, + {"shape": (), "indexes": numpy.zeros((), dtype=numpy.bool_)}, + {"shape": (0,), "indexes": None}, + {"shape": (0,), "indexes": ()}, + {"shape": (2, 0), "indexes": ([1],)}, + {"shape": (0, 3), "indexes": (slice(None), [1])}, + {"shape": (0,), "indexes": True}, + {"shape": (0,), "indexes": (True,)}, + # {'shape': (0,), 'indexes': (False, True, True)}, dpctl-1911 + {"shape": (0,), "indexes": numpy.ones((), dtype=numpy.bool_)}, + {"shape": (0,), "indexes": numpy.zeros((), dtype=numpy.bool_)}, + # ellipsis + {"shape": (2, 3, 4), "indexes": (1, Ellipsis, 2)}, + # issue #1512 + {"shape": (2, 3, 4), "indexes": (Ellipsis, numpy.array(False))}, + {"shape": (2, 3, 4), "indexes": (Ellipsis, numpy.ones((3, 4), bool))}, + # issue #4799 + # {'shape': (3, 4, 5), # dpctl-1912 + # 'indexes': (slice(None), [0, 1], Ellipsis, [0, 1])}, + # {'shape': (2, 3, 4), # dpctl-1912 + # 'indexes': (slice(None), [1, 0], Ellipsis, numpy.ones((5, 2), int))}, + _ids=False, # Do not generate ids from randomly generated params +) +class TestArrayAdvancedIndexingGetitemParametrized: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_getitem(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + indexes = self.indexes + if xp is cupy: + + def _cast_to_ndarray(x): + if isinstance(x, list) and ( + len(x) == 0 + or len(x) == 1 + and isinstance(x[0], list) + and len(x[0]) == 0 + ): + return cupy.asarray(x, dtype=numpy.intp) + return cupy.asarray(x) + + if isinstance(indexes, tuple): + is_valid = lambda x: not isinstance(x, (list, numpy.ndarray)) + indexes = tuple( + x if is_valid(x) else _cast_to_ndarray(x) for x in indexes + ) + elif isinstance(indexes, (list, numpy.ndarray)): + indexes = _cast_to_ndarray(indexes) + return a[indexes] + + +@testing.parameterize( + # empty arrays (list indexes) + {"shape": (2, 3, 4), "indexes": [[]]}, + {"shape": (2, 3, 4), "indexes": [[[]]]}, + {"shape": (2, 3, 4), "indexes": [[[[]]]]}, + {"shape": (2, 3, 4, 5), "indexes": [[[[]]]]}, + {"shape": (2, 3, 4, 5), "indexes": [[[[[]]]]]}, + # list indexes + {"shape": (2, 3, 4), "indexes": [[1]]}, + {"shape": (2, 3, 4), "indexes": [[1, 1]]}, + {"shape": (2, 3, 4), "indexes": [[1], [1]]}, +) +@testing.with_requires("numpy>=1.23") +class TestArrayAdvancedIndexingGetitemParametrized2: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_getitem(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + indexes = self.indexes + if xp is cupy: + # dpnp does not support a list in advanced indexing + indexes = cupy.asarray(indexes, dtype=numpy.intp) + return a[indexes] + + +@testing.parameterize( + # list indexes + {"shape": (2, 3, 4), "indexes": [[1, 1], 1]}, + {"shape": (2, 3, 4), "indexes": [[1], slice(1, 2)]}, + {"shape": (2, 3, 4), "indexes": [[[1]], slice(1, 2)]}, +) +@pytest.mark.skip("no support of a list in advanced indexing") +@testing.with_requires("numpy>=1.24") +class TestArrayAdvancedIndexingGetitemParametrizedValueError: + + @testing.for_all_dtypes() + def test_adv_getitem(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp, dtype) + with pytest.raises(ValueError): + a[self.indexes] + + +@testing.parameterize( + { + "shape": (2, 3, 4), + "transpose": (1, 2, 0), + "indexes": (slice(None), [1, 0]), + }, + { + "shape": (2, 3, 4), + "transpose": (1, 0, 2), + "indexes": (None, [1, 2], [0, -1]), + }, +) +class TestArrayAdvancedIndexingGetitemParametrizedTransp: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_getitem(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + if self.transpose: + a = a.transpose(self.transpose) + + indexes = self.indexes + if xp is cupy: + # dpnp does not support a list in advanced indexing + indexes = tuple( + cupy.asarray(x) if isinstance(x, list) else x for x in indexes + ) + return a[indexes] + + +class TestArrayAdvancedIndexingGetitemCupyIndices: + + shape = (2, 3, 4) + + def test_adv_getitem_cupy_indices1(self): + a = cupy.zeros(self.shape) + index = cupy.array([1, 0]) + original_index = index.copy() + b = a[index] + b_cpu = a.asnumpy()[index.asnumpy()] + testing.assert_array_equal(b, b_cpu) + testing.assert_array_equal(original_index, index) + + def test_adv_getitem_cupy_indices2(self): + a = cupy.zeros(self.shape) + index = cupy.array([1, 0]) + original_index = index.copy() + b = a[(slice(None), index)] + b_cpu = a.asnumpy()[(slice(None), index.asnumpy())] + testing.assert_array_equal(b, b_cpu) + testing.assert_array_equal(original_index, index) + + def test_adv_getitem_cupy_indices3(self): + a = cupy.zeros(self.shape) + index = cupy.array([True, False]) + original_index = index.copy() + b = a[index] + b_cpu = a.asnumpy()[index.asnumpy()] + testing.assert_array_equal(b, b_cpu) + testing.assert_array_equal(original_index, index) + + def test_adv_getitem_cupy_indices4(self): + a = cupy.zeros(self.shape) + index = cupy.array([4, -5]) + original_index = index.copy() + b = a[index] + b_cpu = a.asnumpy()[index.asnumpy() % self.shape[1]] + testing.assert_array_equal(b, b_cpu) + testing.assert_array_equal(original_index, index) + + def test_adv_getitem_cupy_indices5(self): + a = cupy.zeros(self.shape) + index = cupy.array([4, -5]) + original_index = index.copy() + b = a[cupy.array([1, 0]), index] + b_cpu = a.asnumpy()[[1, 0], index.asnumpy() % self.shape[1]] + testing.assert_array_equal(b, b_cpu) + testing.assert_array_equal(original_index, index) + + +@testing.parameterize( + { + "shape": (2**3 + 1, 2**4), + "indexes": ( + numpy.array([2**3], dtype=numpy.int8), + numpy.array([1], dtype=numpy.int8), + ), + }, + { + "shape": (2**4 + 1, 2**4), + "indexes": ( + numpy.array([2**4], dtype=numpy.uint8), + numpy.array([1], dtype=numpy.uint8), + ), + }, + { + "shape": (2**7 + 1, 2**8), + "indexes": ( + numpy.array([2**7], dtype=numpy.int16), + numpy.array([1], dtype=numpy.int16), + ), + }, + { + "shape": (2**8 + 1, 2**8), + "indexes": ( + numpy.array([2**8], dtype=numpy.uint16), + numpy.array([1], dtype=numpy.uint16), + ), + }, + { + "shape": (2**7 + 1, 2**8), + "indexes": ( + numpy.array([2**7], dtype=numpy.int16), + numpy.array([1], dtype=numpy.int32), + ), + }, + { + "shape": (2**7 + 1, 2**8), + "indexes": ( + numpy.array([2**7], dtype=numpy.int16), + numpy.array([1], dtype=numpy.int8), + ), + }, + # Three-dimensional case + # dpctl-1912 + # {'shape': (2**3 + 1, 3, 2**4), 'indexes': ( + # numpy.array([2**3], dtype=numpy.int8), + # slice(None), + # numpy.array([1], dtype=numpy.int8))}, +) +class TestArrayAdvancedIndexingOverflow: + + def test_getitem(self): + a = cupy.arange(numpy.prod(self.shape)).reshape(self.shape) + indexes_gpu = [] + for s in self.indexes: + if isinstance(s, numpy.ndarray): + s = cupy.array(s) + indexes_gpu.append(s) + indexes_gpu = tuple(indexes_gpu) + b = a[indexes_gpu] + b_cpu = a.asnumpy()[self.indexes] + testing.assert_array_equal(b, b_cpu) + + def test_setitem(self): + a_cpu = numpy.arange(numpy.prod(self.shape)).reshape(self.shape) + a = cupy.array(a_cpu) + indexes_gpu = [] + for s in self.indexes: + if isinstance(s, numpy.ndarray): + s = cupy.array(s) + indexes_gpu.append(s) + indexes_gpu = tuple(indexes_gpu) + a[indexes_gpu] = -1 + a_cpu[self.indexes] = -1 + testing.assert_array_equal(a, a_cpu) + + +@testing.parameterize( + {"shape": (), "indexes": (-1,)}, + {"shape": (), "indexes": (0,)}, + {"shape": (), "indexes": (1,)}, + {"shape": (), "indexes": ([0],)}, + {"shape": (), "indexes": (numpy.array([0]),)}, + {"shape": (), "indexes": (numpy.array(0),)}, + {"shape": (), "indexes": numpy.array([True])}, + {"shape": (), "indexes": numpy.array([False, True, True])}, + {"shape": (), "indexes": ([False],)}, + {"shape": (0,), "indexes": (-1,)}, + {"shape": (0,), "indexes": (0,)}, + {"shape": (0,), "indexes": (1,)}, + {"shape": (0,), "indexes": ([0],)}, + {"shape": (0,), "indexes": (numpy.array([0]),)}, + {"shape": (0,), "indexes": (numpy.array(0),)}, + {"shape": (0,), "indexes": numpy.array([True])}, + {"shape": (0,), "indexes": numpy.array([False, True, True])}, + {"shape": (0, 1), "indexes": (0, Ellipsis)}, + {"shape": (2, 3), "indexes": (slice(None), [1, 2], slice(None))}, + {"shape": (2, 3), "indexes": numpy.array([], dtype=numpy.float64)}, + {"shape": (3, 4), "indexes": ([1, 0], [True, True])}, + { + "shape": (2, 3, 4), + "indexes": ([True, True], [[True, True, False, False]]), + }, + {"shape": (2, 3, 4), "indexes": ([True, True], [[True], [True], [False]])}, + {"shape": (2, 3, 4), "indexes": numpy.empty((0, 1), bool)}, + { + "shape": (2, 3, 4), + "indexes": (numpy.empty(0, bool), numpy.empty((0, 2), bool)), + }, +) +class TestArrayInvalidIndexAdvGetitem: + + def test_invalid_adv_getitem(self): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp) + with pytest.raises(IndexError): + a[self.indexes] + + +@testing.parameterize( + {"shape": (0,), "indexes": ([False],)}, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.random.choice([False, True], (3, 1))), + }, + {"shape": (2, 3, 4), "indexes": numpy.random.choice([False, True], (1, 3))}, + _ids=False, # Do not generate ids from randomly generated params +) +class TestArrayInvalidIndexAdvGetitem2: + + def test_invalid_adv_getitem(self): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp) + with pytest.raises(IndexError): + a[self.indexes] + + +@testing.parameterize( + {"shape": (2, 3, 4), "indexes": [1, [1, [1]]]}, +) +@pytest.mark.skip("no support of a list in advanced indexing") +@testing.with_requires("numpy>=1.24") +class TestArrayInvalidValueAdvGetitem: + + def test_invalid_adv_getitem(self): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp) + with pytest.raises(ValueError): + a[self.indexes] + + +@testing.parameterize( + # array only + {"shape": (2, 3, 4), "indexes": numpy.array(-1), "value": 1}, + {"shape": (2, 3, 4), "indexes": numpy.array([1, 0]), "value": 1}, + {"shape": (2, 3, 4), "indexes": [1, 0], "value": 1}, + {"shape": (2, 3, 4), "indexes": [1, -1], "value": 1}, + {"shape": (2, 3, 4), "indexes": (slice(None), [1, 2]), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": ( + slice(None), + [[1, 2], [0, -1]], + ), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), slice(None), [[1, 2], [0, -1]]), + "value": 1, + }, + # slice and array + { + "shape": (2, 3, 4), + "indexes": (slice(None), slice(1, 2), [[1, 2], [0, -1]]), + "value": 1, + }, + # None and array + {"shape": (2, 3, 4), "indexes": (None, [1, -1]), "value": 1}, + {"shape": (2, 3, 4), "indexes": (None, [1, -1], None), "value": 1}, + {"shape": (2, 3, 4), "indexes": (None, None, None, [1, -1]), "value": 1}, + # None, slice and array + {"shape": (2, 3, 4), "indexes": (slice(0, 1), None, [1, -1]), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": (slice(0, 1), slice(1, 2), [1, -1]), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (slice(0, 1), None, slice(1, 2), [1, -1]), + "value": 1, + }, + # mask + {"shape": (2, 3, 4), "indexes": numpy.array([True, False]), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": (1, numpy.array([True, False, True])), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.array([True, False]), 1), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.array([True, False, True])), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), 2, numpy.array([True, False, True, False])), + "value": 1, + }, + {"shape": (2, 3, 4), "indexes": (slice(None), 2, False), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": ( + slice(None), + slice(None), + numpy.random.choice([False, True], (4,)), + ), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.random.choice([False, True], (2, 3)),), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": ( + slice(None), + numpy.random.choice([False, True], (3, 4)), + ), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.random.choice([False, True], (2, 3, 4)),), + "value": 1, + }, + # {'shape': (2, 3, 4), + # 'indexes': (1, None, numpy.array([True, False, True])), 'value': 1}, dpctl-1911 + # multiple arrays + {"shape": (2, 3, 4), "indexes": ([0, -1], [1, -1]), "value": 1}, + {"shape": (2, 3, 4), "indexes": ([0, -1], [1, -1], [2, 1]), "value": 1}, + {"shape": (2, 3, 4), "indexes": ([0, -1], 1), "value": 1}, + # {'shape': (2, 3, 4), 'indexes': ([0, -1], slice(None), [1, -1]), + # 'value': 1}, #dpctl-1912 + {"shape": (2, 3, 4), "indexes": ([0, -1], 1, 2), "value": 1}, + # {'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [[2, 0], [3, 1]]), + # 'value': 1}, #dpctl-1912 + # multiple arrays and basic indexing + # {'shape': (2, 3, 4), 'indexes': ([0, -1], None, [1, 0]), 'value': 1}, #dpctl-1912 + # {'shape': (2, 3, 4), 'indexes': ([0, -1], slice(0, 2), [1, 0]), + # 'value': 1}, #dpctl-1912 + # {'shape': (2, 3, 4), 'indexes': ([0, -1], None, slice(0, 2), [1, 0]), + # 'value': 1}, #dpctl-1912 + { + "shape": (1, 1, 2, 3, 4), + "indexes": (None, slice(None), slice(None), [1, 0], [2, -1], 1), + "value": 1, + }, + # {'shape': (1, 1, 2, 3, 4), + # 'indexes': (None, slice(None), 0, [1, 0], slice(0, 2, 2), [2, -1]), + # 'value': 1}, #dpctl-1912 + { + "shape": (2, 3, 4), + "indexes": (slice(None), [0, -1], [[1, 0], [0, 1], [-1, 1]]), + "value": 1, + }, + # empty arrays + {"shape": (2, 3, 4), "indexes": [], "value": 1}, + {"shape": (2, 3, 4), "indexes": [], "value": numpy.array([1, 1, 1, 1])}, + { + "shape": (2, 3, 4), + "indexes": [], + "value": numpy.random.uniform(size=(3, 4)), + }, + { + "shape": (2, 3, 4), + "indexes": numpy.array([], dtype=numpy.int32), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": numpy.array([[]], dtype=numpy.int32), + "value": numpy.random.uniform(size=(3, 4)), + }, + {"shape": (2, 3, 4), "indexes": (slice(None), []), "value": 1}, + {"shape": (2, 3, 4), "indexes": ([], []), "value": 1}, + # {'shape': (2, 3, 4), 'indexes': numpy.array([], dtype=numpy.bool_), + # 'value': 1}, #dpctl-1913 + # {'shape': (2, 3, 4), + # 'indexes': (slice(None), numpy.array([], dtype=numpy.bool_)), + # 'value': 1}, #dpctl-1913 + # {'shape': (2, 3, 4), 'indexes': numpy.array([[], []], dtype=numpy.bool_), + # 'value': numpy.random.uniform(size=(4,))}, #dpctl-1913 + # {'shape': (2, 3, 4), 'indexes': numpy.empty((0, 0, 4), bool), 'value': 1}, #dpctl-1913 + # multiple masks + {"shape": (2, 3, 4), "indexes": (True, [True, False]), "value": 1}, + {"shape": (2, 3, 4), "indexes": (False, [True, False]), "value": 1}, + {"shape": (2, 3, 4), "indexes": (True, [[1]], slice(1, 2)), "value": 1}, + {"shape": (2, 3, 4), "indexes": (False, [[1]], slice(1, 2)), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": (True, [[1]], slice(1, 2), True), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": (True, [[1]], slice(1, 2), False), + "value": 1, + }, + { + "shape": (2, 3, 4), + "indexes": ( + Ellipsis, + [[1, 1, -3], [0, 2, 2]], + [True, False, True, True], + ), + "value": [[1, 2, 3], [4, 5, 6]], + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.empty((0, 3), bool), numpy.empty(0, bool)), + "value": 1, + }, + # zero-dim and zero-sized arrays + {"shape": (), "indexes": Ellipsis, "value": 1}, + {"shape": (), "indexes": (), "value": 1}, + {"shape": (), "indexes": None, "value": 1}, + {"shape": (), "indexes": True, "value": 1}, + {"shape": (), "indexes": (True,), "value": 1}, + {"shape": (), "indexes": (False, True, True), "value": 1}, + {"shape": (), "indexes": numpy.ones((), dtype=numpy.bool_), "value": 1}, + {"shape": (), "indexes": numpy.zeros((), dtype=numpy.bool_), "value": 1}, + {"shape": (0,), "indexes": None, "value": 1}, + {"shape": (0,), "indexes": (), "value": 1}, + {"shape": (0,), "indexes": True, "value": 1}, + {"shape": (0,), "indexes": (True,), "value": 1}, + {"shape": (0,), "indexes": (False, True, True), "value": 1}, + {"shape": (0,), "indexes": numpy.ones((), dtype=numpy.bool_), "value": 1}, + {"shape": (0,), "indexes": numpy.zeros((), dtype=numpy.bool_), "value": 1}, + # ellipsis + {"shape": (2, 3, 4), "indexes": (1, Ellipsis, 2), "value": 1}, + # issue #1512 + {"shape": (2, 3, 4), "indexes": (Ellipsis, numpy.array(False)), "value": 1}, + { + "shape": (2, 3, 4), + "indexes": (Ellipsis, numpy.ones((3, 4), bool)), + "value": 1, + }, + # issue #4799 + # {'shape': (3, 4, 5), #dpctl-1912 + # 'indexes': (slice(None), [0, 1], Ellipsis, [0, 1]), 'value': 1}, + # {'shape': (2, 3, 4), #dpctl-1912 + # 'indexes': (slice(None), [1, 0], Ellipsis, numpy.ones((5, 2), int)), + # 'value': 1}, + _ids=False, # Do not generate ids from randomly generated params +) +class TestArrayAdvancedIndexingSetitemScalarValue: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_setitem(self, xp, dtype): + a = xp.zeros(self.shape, dtype=dtype) + indexes = self.indexes + if xp is cupy: + + def _cast_to_ndarray(x): + if isinstance(x, list) and len(x) == 0: + return cupy.asarray(x, dtype=numpy.intp) + return cupy.asarray(x) + + if isinstance(indexes, tuple): + is_valid = lambda x: not isinstance(x, (list, numpy.ndarray)) + indexes = tuple( + x if is_valid(x) else _cast_to_ndarray(x) for x in indexes + ) + elif isinstance(indexes, (list, numpy.ndarray)): + indexes = _cast_to_ndarray(indexes) + a[indexes] = self.value + return a + + +@testing.parameterize( + # empty arrays (list indexes) + {"shape": (2, 3, 4), "indexes": [[]], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[[]]], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[[[]]]], "value": 1}, + {"shape": (2, 3, 4, 5), "indexes": [[[[]]]], "value": 1}, + {"shape": (2, 3, 4, 5), "indexes": [[[[[]]]]], "value": 1}, + # list indexes + {"shape": (2, 3, 4), "indexes": [[1]], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[1, 0]], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[1], [0]], "value": 1}, +) +@testing.with_requires("numpy>=1.23") +class TestArrayAdvancedIndexingSetitemScalarValue2: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_setitem(self, xp, dtype): + a = xp.zeros(self.shape, dtype=dtype) + indexes = self.indexes + if xp is cupy: + # dpnp does not support a list in advanced indexing + indexes = cupy.asarray(indexes, dtype=numpy.intp) + a[indexes] = self.value + return a + + +@testing.parameterize( + # zero-dim and zero-sized arrays + {"shape": (), "indexes": numpy.array([True]), "value": 1}, + {"shape": (), "indexes": numpy.array([False, True, True]), "value": 1}, + {"shape": (0,), "indexes": numpy.array([True]), "value": 1}, + {"shape": (0,), "indexes": numpy.array([False, True, True]), "value": 1}, +) +class TestArrayAdvancedIndexingSetitemScalarValueIndexError: + + def test_adv_setitem(self): + for xp in (numpy, cupy): + a = xp.zeros(self.shape) + with pytest.raises(IndexError): + a[self.indexes] = self.value + + +@testing.parameterize( + # list indexes + {"shape": (2, 3, 4), "indexes": [[1, 0], 2], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[1], slice(1, 2)], "value": 1}, + {"shape": (2, 3, 4), "indexes": [[[1]], slice(1, 2)], "value": 1}, +) +@pytest.mark.skip("no support of a list in advanced indexing") +@testing.with_requires("numpy>=1.24") +class TestArrayAdvancedIndexingSetitemScalarValueValueError2: + + @testing.for_all_dtypes() + def test_adv_setitem(self, dtype): + for xp in (numpy, cupy): + a = xp.zeros(self.shape, dtype=dtype) + with pytest.raises(ValueError): + a[self.indexes] = self.value + + +@testing.parameterize( + {"shape": (2, 3, 4), "indexes": numpy.array(1), "value": numpy.array([1])}, + { + "shape": (2, 3, 4), + "indexes": numpy.array(1), + "value": numpy.array([1, 2, 3, 4]), + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), [0, -1]), + "value": numpy.arange(2 * 2 * 4).reshape(2, 2, 4), + }, + { + "shape": (2, 5, 4), + "indexes": (slice(None), [[0, 2], [1, -1]]), + "value": numpy.arange(2 * 2 * 2 * 4).reshape(2, 2, 2, 4), + }, + # mask + { + "shape": (2, 3, 4), + "indexes": numpy.random.choice([False, True], (2, 3)), + "value": numpy.arange(4), + }, + { + "shape": (2, 3, 4), + "indexes": (slice(None), numpy.array([True, False, True])), + "value": numpy.arange(2 * 2 * 4).reshape(2, 2, 4), + }, + { + "shape": (2, 3, 4), + "indexes": (numpy.array([[True, False, False], [False, True, True]]),), + "value": numpy.arange(3 * 4).reshape(3, 4), + }, + { + "shape": (2, 2, 2), + "indexes": ( + slice(None), + numpy.array([[True, False], [False, True]]), + ), + "value": numpy.arange(2 * 2).reshape(2, 2), + }, + { + "shape": (2, 2, 2), + "indexes": ( + numpy.array( + [[[True, False], [True, False]], [[True, True], [False, False]]] + ), + ), + "value": numpy.arange(4), + }, + { + "shape": (5,), + "indexes": numpy.array([True, False, False, True, True]), + "value": numpy.arange(3), + }, + # multiple arrays + { + "shape": (2, 3, 4), + "indexes": ([1, 0], [2, 1]), + "value": numpy.arange(2 * 4).reshape(2, 4), + }, + # {'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [2, 1]), + # 'value': numpy.arange(2 * 3).reshape(2, 3)}, #dpctl-1912 + # {'shape': (2, 3, 4), 'indexes': ([1, 0], slice(None), [[2, 0], [3, 1]]), + # 'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)}, #dpctl-1912 + # {'shape': (2, 3, 4), #dpctl-1912 + # 'indexes': ([[1, 0], [1, 0]], slice(None), [[2, 0], [3, 1]]), + # 'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)}, + # {'shape': (2, 3, 4), #dpctl-1911 + # 'indexes': (1, slice(None), [[2, 0], [3, 1]]), + # 'value': numpy.arange(2 * 2 * 3).reshape(2, 2, 3)}, + # list indexes + { + "shape": (2, 3, 4), + "indexes": [1], + "value": numpy.arange(3 * 4).reshape(3, 4), + }, + _ids=False, # Do not generate ids from randomly generated params +) +class TestArrayAdvancedIndexingVectorValue: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_adv_setitem(self, xp, dtype): + a = xp.zeros(self.shape, dtype=dtype) + indexes = self.indexes + if xp is cupy: + if isinstance(indexes, tuple): + is_valid = lambda x: not isinstance(x, (list, numpy.ndarray)) + indexes = tuple( + x if is_valid(x) else cupy.asarray(x) for x in indexes + ) + elif isinstance(indexes, (list, numpy.ndarray)): + indexes = cupy.asarray(indexes) + a[indexes] = self.value.astype(a.dtype) + return a + + +class TestArrayAdvancedIndexingSetitemCupyIndices: + + shape = (2, 3) + + def test_cupy_indices_integer_array_1(self): + a = cupy.zeros(self.shape) + index = cupy.array([0, 1]) + original_index = index.copy() + a[:, index] = cupy.array(1.0) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]]) + ) + testing.assert_array_equal(index, original_index) + + @pytest.mark.skip("due to dpctl-1911") + def test_cupy_indices_integer_array_2(self): + a = cupy.zeros(self.shape) + index = cupy.array([3, -5]) + original_index = index.copy() + a[:, index] = cupy.array(1.0) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]]) + ) + testing.assert_array_equal(index, original_index) + + @pytest.mark.skip("due to dpctl-1911") + def test_cupy_indices_integer_array_3(self): + a = cupy.zeros(self.shape) + index = cupy.array([3, -5]) + original_index = index.copy() + a[cupy.array([1, 1]), index] = cupy.array(1.0) + testing.assert_array_equal( + a, cupy.array([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0]]) + ) + testing.assert_array_equal(index, original_index) + + def test_cupy_indices_boolean_array(self): + a = cupy.zeros(self.shape) + index = cupy.array([True, False]) + original_index = index.copy() + a[index] = cupy.array(1.0) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]]) + ) + testing.assert_array_almost_equal(original_index, index) + + +class TestArrayAdvancedIndexingSetitemDifferentDtypes: + + @testing.for_all_dtypes_combination( + names=["src_dtype", "dst_dtype"], no_complex=True + ) + @testing.numpy_cupy_array_equal() + def test_differnt_dtypes(self, xp, src_dtype, dst_dtype): + shape = (2, 3) + a = xp.zeros(shape, dtype=src_dtype) + indexes = xp.array([0, 1]) + a[:, indexes] = xp.array(1, dtype=dst_dtype) + return a + + @testing.for_all_dtypes_combination( + names=["src_dtype", "dst_dtype"], no_complex=True + ) + @testing.numpy_cupy_array_equal() + def test_differnt_dtypes_mask(self, xp, src_dtype, dst_dtype): + shape = (2, 3) + a = xp.zeros(shape, dtype=src_dtype) + indexes = xp.array([True, False]) + a[indexes] = xp.array(1, dtype=dst_dtype) + return a + + +class TestArrayAdvancedIndexingSetitemTranspose: + + @pytest.mark.skip("due to dpctl-1912") + @testing.numpy_cupy_array_equal() + def test_adv_setitem_transp(self, xp): + shape = (2, 3, 4) + a = xp.zeros(shape).transpose(0, 2, 1) + slices = (xp.array([1, 0]), slice(None), xp.array([2, 1])) + a[slices] = 1 + return a diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py index 4d7ee0eb0034..282c38b72e66 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_complex_ops.py @@ -9,6 +9,7 @@ class TestConj(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal() def test_conj(self, xp, dtype): @@ -20,7 +21,7 @@ def test_conj(self, xp, dtype): def test_conj_pass(self, xp, dtype): x = testing.shaped_arange((2, 3), xp, dtype) y = x.conj() - self.assertIs(x, y) + assert x is y return y @testing.for_all_dtypes() @@ -34,11 +35,12 @@ def test_conjugate(self, xp, dtype): def test_conjugate_pass(self, xp, dtype): x = testing.shaped_arange((2, 3), xp, dtype) y = x.conjugate() - self.assertIs(x, y) + assert x is y return y class TestAngle(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal(type_check=has_support_aspect64()) def test_angle(self, xp, dtype): @@ -47,6 +49,7 @@ def test_angle(self, xp, dtype): class TestRealImag(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_almost_equal(accept_error=False) def test_real(self, xp, dtype): @@ -154,11 +157,12 @@ def test_imag_inplace(self, dtype): class TestScalarConversion(unittest.TestCase): + @testing.for_all_dtypes() def test_scalar_conversion(self, dtype): scalar = 1 + 1j if numpy.dtype(dtype).kind == "c" else 1 - x_1d = cupy.array([scalar]).astype(dtype) - self.assertEqual(complex(x_1d), scalar) + x_1d = cupy.array(scalar).astype(dtype) + assert complex(x_1d) == scalar x_0d = x_1d.reshape(()) - self.assertEqual(complex(x_0d), scalar) + assert complex(x_0d) == scalar diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_contiguity.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_contiguity.py new file mode 100644 index 000000000000..7331105f3b7b --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_contiguity.py @@ -0,0 +1,16 @@ +import unittest + +from dpnp.tests.third_party.cupy import testing + + +class TestArrayContiguity(unittest.TestCase): + + def test_is_contiguous(self): + a = testing.shaped_arange((2, 3, 4)) + assert a.flags.c_contiguous is True + b = a.transpose(2, 0, 1) + assert b.flags.c_contiguous is False + c = a[::-1] + assert c.flags.c_contiguous is False + d = a[:, :, ::2] + assert d.flags.c_contiguous is False diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_conversion.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_conversion.py index b6a1a4227198..90961602eba1 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_conversion.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_conversion.py @@ -13,6 +13,7 @@ {"shape": (1, 1, 1)}, ) class TestNdarrayItem(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_item(self, xp, dtype): @@ -26,6 +27,7 @@ def test_item(self, xp, dtype): {"shape": (1, 0, 1)}, ) class TestNdarrayItemRaise(unittest.TestCase): + def test_item(self): for xp in (numpy, cupy): a = testing.shaped_arange(self.shape, xp, xp.float32) @@ -40,7 +42,9 @@ def test_item(self): {"shape": (2, 3), "order": "C"}, {"shape": (2, 3), "order": "F"}, ) +@pytest.mark.skip("tobytes() method is not supported yet") class TestNdarrayToBytes(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_item(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py index 0b738acfd97a..515243d89c5c 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_copy_and_view.py @@ -27,6 +27,7 @@ def get_strides(xp, a): @pytest.mark.skip("'dpnp_array' object has no attribute 'view' yet") class TestView: + @testing.numpy_cupy_array_equal() def test_view(self, xp): a = testing.shaped_arange((4,), xp, dtype=numpy.float32) @@ -188,6 +189,7 @@ def test_view_larger_dtype_zero_sized(self, xp): class TestArrayCopy: + @testing.for_orders("CF") @testing.for_dtypes( [numpy.int16, numpy.int64, numpy.float16, numpy.float64] @@ -199,23 +201,25 @@ def test_isinstance_numpy_copy(self, xp, dtype, order): b[:] = a return b - @pytest.mark.skip("Doesn't raise ValueError in numpy") + @pytest.mark.skip("copy from host to device is allowed") def test_isinstance_numpy_copy_wrong_dtype(self): - a = numpy.arange(100, dtype=numpy.float32).reshape(10, 10) + a = numpy.arange(100, dtype=cupy.default_float_type()).reshape(10, 10) b = cupy.empty(a.shape, dtype=numpy.int32) with pytest.raises(ValueError): b[:] = a def test_isinstance_numpy_copy_wrong_shape(self): for xp in (numpy, cupy): - a = numpy.arange(100, dtype=numpy.float32).reshape(10, 10) + a = numpy.arange(100, dtype=cupy.default_float_type()).reshape( + 10, 10 + ) b = cupy.empty(100, dtype=a.dtype) with pytest.raises(ValueError): b[:] = a @testing.numpy_cupy_array_equal() def test_isinstance_numpy_copy_not_slice(self, xp): - a = xp.arange(5, dtype=numpy.float32) + a = xp.arange(5, dtype=cupy.default_float_type()) a[a < 3] = 0 return a @@ -228,6 +232,7 @@ def test_copy_host_to_device_view(self): class TestArrayFlatten: + @testing.numpy_cupy_array_equal() def test_flatten(self, xp): a = testing.shaped_arange((2, 3, 4), xp) @@ -245,13 +250,13 @@ def test_flatten_transposed(self, xp): a = testing.shaped_arange((2, 3, 4), xp).transpose(2, 0, 1) return a.flatten() - @testing.for_orders("CFAK") + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_flatten_order(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp) return a.flatten(order) - @testing.for_orders("CFAK") + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_flatten_order_copied(self, xp, order): a = testing.shaped_arange((4,), xp) @@ -259,7 +264,7 @@ def test_flatten_order_copied(self, xp, order): a[:] = 1 return b - @testing.for_orders("CFAK") + @testing.for_orders("CFA") @testing.numpy_cupy_array_equal() def test_flatten_order_transposed(self, xp, order): a = testing.shaped_arange((2, 3, 4), xp).transpose(2, 0, 1) @@ -267,6 +272,7 @@ def test_flatten_order_transposed(self, xp, order): class TestArrayFill: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_fill(self, xp, dtype): @@ -274,7 +280,10 @@ def test_fill(self, xp, dtype): a.fill(1) return a - @pytest.mark.skip("Numpy allows Numpy scalar arrays as fill value") + @pytest.mark.skip("NumPy array as input is not supported") + @testing.with_requires("numpy>=1.24.0") + @testing.for_all_dtypes_combination(("dtype1", "dtype2")) + @testing.numpy_cupy_array_equal(accept_error=ComplexWarning) def test_fill_with_numpy_scalar_ndarray(self, xp, dtype1, dtype2): a = testing.shaped_arange((2, 3, 4), xp, dtype1) a.fill(numpy.ones((), dtype=dtype2)) @@ -289,9 +298,7 @@ def test_fill_with_cupy_scalar_ndarray(self, xp, dtype1, dtype2): a.fill(b) return a - @pytest.mark.skip( - "it's allowed to broadcast dpnp array while filling, no exception then" - ) + @pytest.mark.skip("NumPy array as input is not supported") @testing.for_all_dtypes() def test_fill_with_nonscalar_ndarray(self, dtype): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -309,6 +316,7 @@ def test_transposed_fill(self, xp, dtype): class TestArrayAsType: + @testing.for_orders(["C", "F", "A", "K", None]) @testing.for_all_dtypes_combination(("src_dtype", "dst_dtype")) @testing.numpy_cupy_array_equal() @@ -388,6 +396,7 @@ def test_astype_boolean_view(self, xp): class TestArrayDiagonal: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_diagonal1(self, xp, dtype): @@ -420,6 +429,7 @@ def test_isinstance_numpy_view_copy_f(self, xp, dtype, order): class C_cp(cupy.ndarray): + def __new__(cls, *args, info=None, **kwargs): obj = super().__new__(cls, *args, **kwargs) obj.info = info @@ -432,6 +442,7 @@ def __array_finalize__(self, obj): class C_np(numpy.ndarray): + def __new__(cls, *args, info=None, **kwargs): obj = super().__new__(cls, *args, **kwargs) obj.info = info @@ -445,18 +456,19 @@ def __array_finalize__(self, obj): @pytest.mark.skip("'dpnp_array' object has no attribute 'view' yet") class TestSubclassArrayView: + def test_view_casting(self): for xp, C in [(numpy, C_np), (cupy, C_cp)]: - a = xp.arange(5, dtype="i").view("F") + a = xp.arange(5, dtype="i").view("f") assert type(a) is xp.ndarray assert a.dtype == xp.float32 - a = xp.arange(5, dtype="i").view(dtype="F") + a = xp.arange(5, dtype="i").view(dtype="f") assert type(a) is xp.ndarray assert a.dtype == xp.float32 with pytest.raises(TypeError): - xp.arange(5, dtype="i").view("F", dtype="F") + xp.arange(5, dtype="i").view("f", dtype="f") a = xp.arange(5, dtype="i").view(C) assert type(a) is C @@ -476,7 +488,7 @@ def test_view_casting(self): assert a.info is None with pytest.raises(TypeError): - xp.arange(5).view("F", C, type=C) + xp.arange(5).view("f", C, type=C) with pytest.raises(ValueError): cupy.arange(5).view(type=numpy.ndarray) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_cuda_array_interface.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_cuda_array_interface.py new file mode 100644 index 000000000000..7dbe085c8643 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_cuda_array_interface.py @@ -0,0 +1,348 @@ +import unittest + +import pytest + +# from cupy_backends.cuda import stream as stream_module +import dpnp as cupy + +# from cupy import _core +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "__cuda_array_interface__ is not supported", allow_module_level=True +) + +# TODO(leofang): test PTDS in this file + + +class DummyObjectWithCudaArrayInterface(object): + + def __init__(self, a, ver=3): + self.a = a + self.ver = ver + + @property + def __cuda_array_interface__(self): + desc = { + "shape": self.a.shape, + "strides": self.a.strides, + "typestr": self.a.dtype.str, + "descr": self.a.dtype.descr, + "data": (self.a.data.ptr, False), + "version": self.ver, + } + if self.ver == 3: + stream = cupy.cuda.get_current_stream() + desc["stream"] = 1 if stream.ptr == 0 else stream.ptr + return desc + + +@testing.parameterize( + *testing.product( + { + "stream": ("null", "new"), + "ver": (2, 3), + } + ) +) +@testing.with_requires("numpy>=1.25") +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestArrayUfunc(unittest.TestCase): + + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose( + rtol=1e-6, accept_error=TypeError, contiguous_check=False + ) + def check_array_scalar_op(self, op, xp, x_type, y_type, trans=False): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + if trans: + a = a.T + + if xp is cupy: + with self.stream: + a = DummyObjectWithCudaArrayInterface(a, self.ver) + return getattr(xp, op)(a, y_type(3)) + else: + return getattr(xp, op)(a, y_type(3)) + + def test_add_scalar(self): + self.check_array_scalar_op("add") + + def test_add_scalar_with_strides(self): + self.check_array_scalar_op("add", trans=True) + + +@testing.parameterize( + *testing.product( + { + "stream": ("null", "new"), + "ver": (2, 3), + } + ) +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestElementwiseKernel(unittest.TestCase): + + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + + @testing.for_all_dtypes_combination() + @testing.numpy_cupy_allclose( + rtol=1e-6, accept_error=TypeError, contiguous_check=False + ) + def check_array_scalar_op(self, op, xp, dtyes, trans=False): + a = xp.array([[1, 2, 3], [4, 5, 6]], dtyes) + if trans: + a = a.T + + if xp is cupy: + with self.stream: + a = DummyObjectWithCudaArrayInterface(a, self.ver) + f = cupy.ElementwiseKernel("T x, T y", "T z", "z = x + y") + return f(a, dtyes(3)) + else: + return a + dtyes(3) + + def test_add_scalar(self): + self.check_array_scalar_op("add") + + def test_add_scalar_with_strides(self): + self.check_array_scalar_op("add", trans=True) + + +@testing.parameterize( + *testing.product( + { + "stream": ("null", "new"), + "ver": (2, 3), + } + ) +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestSimpleReductionFunction(unittest.TestCase): + + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + + self.my_int8_sum = _core.create_reduction_func( + "my_sum", ("b->b",), ("in0", "a + b", "out0 = a", None) + ) + + @testing.numpy_cupy_allclose() + def check_int8_sum(self, shape, xp, axis=None, keepdims=False, trans=False): + a = testing.shaped_random(shape, xp, "b") + if trans: + a = a.T + + if xp == cupy: + with self.stream: + a = DummyObjectWithCudaArrayInterface(a, self.ver) + return self.my_int8_sum(a, axis=axis, keepdims=keepdims) + else: + return a.sum(axis=axis, keepdims=keepdims, dtype="b") + + def test_shape(self): + self.check_int8_sum((2**10,)) + + def test_shape_with_strides(self): + self.check_int8_sum((2**10, 16), trans=True) + + +@testing.parameterize( + *testing.product( + { + "stream": ("null", "new"), + "ver": (2, 3), + } + ) +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestReductionKernel(unittest.TestCase): + + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + + self.my_sum = _core.ReductionKernel( + "T x", "T out", "x", "a + b", "out = a", "0", "my_sum" + ) + + @testing.numpy_cupy_allclose() + def check_int8_sum(self, shape, xp, axis=None, keepdims=False, trans=False): + a = testing.shaped_random(shape, xp, "b") + if trans: + a = a.T + + if xp == cupy: + with self.stream: + a = DummyObjectWithCudaArrayInterface(a, self.ver) + return self.my_sum(a, axis=axis, keepdims=keepdims) + else: + return a.sum(axis=axis, keepdims=keepdims, dtype="b") + + def test_shape(self): + self.check_int8_sum((2**10,)) + + def test_shape_with_strides(self): + self.check_int8_sum((2**10, 16), trans=True) + + +@testing.parameterize( + {"shape": (10,), "slices": (slice(0, None),)}, + {"shape": (10,), "slices": (slice(2, None),)}, + {"shape": (10, 10), "slices": (slice(0, None), slice(0, None))}, + {"shape": (10, 10), "slices": (slice(0, None), slice(2, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(0, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(2, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(4, None))}, +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestSlicingMemoryPointer(unittest.TestCase): + + @testing.for_all_dtypes_combination(names=["dtype"]) + @testing.for_orders("CF") + def test_shape_with_strides(self, dtype, order): + x = cupy.zeros(self.shape, dtype=dtype, order=order) + + start = [s.start for s in self.slices] + itemsize = cupy.dtype(dtype).itemsize + dimsize = [s * itemsize for s in start] + if len(self.shape) == 1: + offset = start[0] * itemsize + else: + if order == "C": + offset = self.shape[0] * dimsize[0] + dimsize[1] + else: + offset = self.shape[0] * dimsize[1] + dimsize[0] + + cai_ptr, _ = x.__cuda_array_interface__["data"] + slice_cai_ptr, _ = x[self.slices].__cuda_array_interface__["data"] + cupy_data_ptr = x.data.ptr + sliced_cupy_data_ptr = x[self.slices].data.ptr + + assert cai_ptr == cupy_data_ptr + assert slice_cai_ptr == sliced_cupy_data_ptr + assert slice_cai_ptr == cai_ptr + offset + + +test_cases = [ + {"shape": (10,), "slices": (slice(0, None),)}, + {"shape": (10,), "slices": (slice(2, None),)}, + {"shape": (10, 10), "slices": (slice(0, None), slice(0, None))}, + {"shape": (10, 10), "slices": (slice(0, None), slice(2, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(0, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(2, None))}, + {"shape": (10, 10), "slices": (slice(2, None), slice(4, None))}, +] +test_streams = ("null", "new") +test_cases_with_stream = [ + {"stream": s, **t} for t in test_cases for s in test_streams +] + + +@testing.parameterize(*test_cases_with_stream) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestCUDAArrayInterfaceCompliance(unittest.TestCase): + + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + + @testing.for_all_dtypes_combination(names=["dtype"]) + @testing.for_orders("CF") + def test_value_type(self, dtype, order): + x = cupy.zeros(self.shape, dtype=dtype, order=order) + y = x[self.slices] + + # mandatory entries + with self.stream: + CAI = y.__cuda_array_interface__ + shape = CAI["shape"] + typestr = CAI["typestr"] + ptr, readonly = CAI["data"] + version = CAI["version"] + strides = CAI["strides"] + + # optional entries + descr = CAI["descr"] if "descr" in CAI else None + stream = CAI["stream"] if "stream" in CAI else None + + # Don't validate correctness of data here, just their types + assert version == 3 # bump this when the protocol is updated! + assert isinstance(CAI, dict) + assert isinstance(shape, tuple) + assert isinstance(typestr, str) + assert isinstance(ptr, int) + assert isinstance(readonly, bool) + assert (strides is None) or isinstance(strides, tuple) + assert (descr is None) or isinstance(descr, list) + if isinstance(descr, list): + for item in descr: + assert isinstance(item, tuple) + assert (stream is None) or isinstance(stream, int) + + +@testing.parameterize( + *testing.product( + { + "stream": ("null", "new", "ptds"), + } + ) +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestCUDAArrayInterfaceStream(unittest.TestCase): + def setUp(self): + if self.stream == "null": + self.stream = cupy.cuda.Stream.null + elif self.stream == "new": + self.stream = cupy.cuda.Stream() + elif self.stream == "ptds": + self.stream = cupy.cuda.Stream.ptds + + def test_stream_export(self): + a = cupy.empty(100) + + # the stream context should export the stream + with self.stream: + stream_ptr = a.__cuda_array_interface__["stream"] + + if self.stream is cupy.cuda.Stream.null: + assert stream_ptr == stream_module.get_default_stream_ptr() + elif self.stream is cupy.cuda.Stream.ptds: + assert stream_ptr == 2 + else: + assert stream_ptr == self.stream.ptr + + # without a stream context, it's always the default stream + stream_ptr = a.__cuda_array_interface__["stream"] + assert stream_ptr == stream_module.get_default_stream_ptr() diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_elementwise_op.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_elementwise_op.py new file mode 100644 index 000000000000..e240f73ddb4d --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_elementwise_op.py @@ -0,0 +1,821 @@ +import operator + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("operator interface is not supported", allow_module_level=True) + + +class TestArrayElementwiseOp: + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError) + def check_array_scalar_op( + self, + op, + xp, + x_type, + y_type, + swap=False, + no_bool=False, + no_complex=False, + ): + x_dtype = numpy.dtype(x_type) + y_dtype = numpy.dtype(y_type) + if no_bool and x_dtype == "?" and y_dtype == "?": + return xp.array(True) + if no_complex and (x_dtype.kind == "c" or y_dtype.kind == "c"): + return xp.array(True) + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + if swap: + return op(y_type(3), a) + else: + return op(a, y_type(3)) + + @testing.with_requires("numpy>=1.25") + def test_add_scalar(self): + self.check_array_scalar_op(operator.add) + + @testing.with_requires("numpy>=1.25") + def test_radd_scalar(self): + self.check_array_scalar_op(operator.add, swap=True) + + def test_iadd_scalar(self): + self.check_array_scalar_op(operator.iadd) + + @testing.with_requires("numpy>=1.25") + def test_sub_scalar(self): + self.check_array_scalar_op(operator.sub, no_bool=True) + + @testing.with_requires("numpy>=1.25") + def test_rsub_scalar(self): + self.check_array_scalar_op(operator.sub, swap=True, no_bool=True) + + def test_isub_scalar(self): + self.check_array_scalar_op(operator.isub, no_bool=True) + + @testing.with_requires("numpy>=1.25") + def test_mul_scalar(self): + self.check_array_scalar_op(operator.mul) + + @testing.with_requires("numpy>=1.25") + def test_rmul_scalar(self): + self.check_array_scalar_op(operator.mul, swap=True) + + def test_imul_scalar(self): + self.check_array_scalar_op(operator.imul) + + @testing.with_requires("numpy>=1.25") + def test_truediv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op(operator.truediv) + + @testing.with_requires("numpy>=1.25") + def test_rtruediv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op(operator.truediv, swap=True) + + def test_itruediv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op(operator.itruediv) + + def test_floordiv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op(operator.floordiv, no_complex=True) + + def test_rfloordiv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op( + operator.floordiv, swap=True, no_complex=True + ) + + def test_ifloordiv_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op(operator.ifloordiv, no_complex=True) + + @testing.with_requires("numpy>=1.25") + def test_pow_scalar(self): + self.check_array_scalar_op(operator.pow) + + @testing.with_requires("numpy>=1.25") + def test_rpow_scalar(self): + self.check_array_scalar_op(operator.pow, swap=True) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError) + def check_ipow_scalar(self, xp, x_type, y_type): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + return operator.ipow(a, y_type(3)) + + def test_ipow_scalar(self): + self.check_ipow_scalar() + + def test_divmod0_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op( + lambda x, y: divmod(x, y)[0], no_complex=True + ) + + def test_divmod1_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op( + lambda x, y: divmod(x, y)[1], no_complex=True + ) + + def test_rdivmod0_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op( + lambda x, y: divmod(x, y)[0], swap=True, no_complex=True + ) + + def test_rdivmod1_scalar(self): + with numpy.errstate(divide="ignore"): + self.check_array_scalar_op( + lambda x, y: divmod(x, y)[1], swap=True, no_complex=True + ) + + def test_lt_scalar(self): + self.check_array_scalar_op(operator.lt, no_complex=False) + + def test_le_scalar(self): + self.check_array_scalar_op(operator.le, no_complex=False) + + def test_gt_scalar(self): + self.check_array_scalar_op(operator.gt, no_complex=False) + + def test_ge_scalar(self): + self.check_array_scalar_op(operator.ge, no_complex=False) + + def test_eq_scalar(self): + self.check_array_scalar_op(operator.eq) + + def test_ne_scalar(self): + self.check_array_scalar_op(operator.ne) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_array_op( + self, op, xp, x_type, y_type, no_bool=False, no_complex=False + ): + x_dtype = numpy.dtype(x_type) + y_dtype = numpy.dtype(y_type) + if no_bool and x_dtype == "?" and y_dtype == "?": + return xp.array(True) + if no_complex and (x_dtype.kind == "c" or y_dtype.kind == "c"): + return xp.array(True) + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[6, 5, 4], [3, 2, 1]], y_type) + return op(a, b) + + def test_add_array(self): + self.check_array_array_op(operator.add) + + def test_iadd_array(self): + self.check_array_array_op(operator.iadd) + + def test_sub_array(self): + self.check_array_array_op(operator.sub, no_bool=True) + + def test_isub_array(self): + self.check_array_array_op(operator.isub, no_bool=True) + + def test_mul_array(self): + self.check_array_array_op(operator.mul) + + def test_imul_array(self): + self.check_array_array_op(operator.imul) + + def test_truediv_array(self): + with numpy.errstate(divide="ignore"): + self.check_array_array_op(operator.truediv) + + def test_itruediv_array(self): + with numpy.errstate(divide="ignore"): + self.check_array_array_op(operator.itruediv) + + def test_floordiv_array(self): + with numpy.errstate(divide="ignore"): + self.check_array_array_op(operator.floordiv, no_complex=True) + + def test_ifloordiv_array(self): + if "1.16.1" <= numpy.lib.NumpyVersion(numpy.__version__) < "1.18.0": + self.skipTest("NumPy Issue #12927") + with numpy.errstate(divide="ignore"): + self.check_array_array_op(operator.ifloordiv, no_complex=True) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError) + def check_pow_array(self, xp, x_type, y_type): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[6, 5, 4], [3, 2, 1]], y_type) + return operator.pow(a, b) + + def test_pow_array(self): + # There are some precision issues in HIP that prevent + # checking with atol=0 + if cupy.cuda.runtime.is_hip: + self.check_pow_array() + else: + self.check_array_array_op(operator.pow) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError) + def check_ipow_array(self, xp, x_type, y_type): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[6, 5, 4], [3, 2, 1]], y_type) + return operator.ipow(a, b) + + def test_ipow_array(self): + self.check_ipow_array() + + def test_divmod0_array(self): + with numpy.errstate(divide="ignore"): + self.check_array_array_op(lambda x, y: divmod(x, y)[0]) + + def test_divmod1_array(self): + with numpy.errstate(divide="ignore"): + self.check_array_array_op(lambda x, y: divmod(x, y)[1]) + + def test_lt_array(self): + self.check_array_array_op(operator.lt, no_complex=True) + + def test_le_array(self): + self.check_array_array_op(operator.le, no_complex=True) + + def test_gt_array(self): + self.check_array_array_op(operator.gt, no_complex=True) + + def test_ge_array(self): + self.check_array_array_op(operator.ge, no_complex=True) + + def test_eq_array(self): + self.check_array_array_op(operator.eq) + + def test_ne_array(self): + self.check_array_array_op(operator.ne) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_broadcasted_op( + self, op, xp, x_type, y_type, no_bool=False, no_complex=False + ): + x_dtype = numpy.dtype(x_type) + y_dtype = numpy.dtype(y_type) + if no_bool and x_dtype == "?" and y_dtype == "?": + return xp.array(True) + if no_complex and (x_dtype.kind == "c" or y_dtype.kind == "c"): + return xp.array(True) + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[1], [2]], y_type) + return op(a, b) + + def test_broadcasted_add(self): + self.check_array_broadcasted_op(operator.add) + + def test_broadcasted_iadd(self): + self.check_array_broadcasted_op(operator.iadd) + + def test_broadcasted_sub(self): + # TODO(unno): sub for boolean array is deprecated in numpy>=1.13 + self.check_array_broadcasted_op(operator.sub, no_bool=True) + + def test_broadcasted_isub(self): + # TODO(unno): sub for boolean array is deprecated in numpy>=1.13 + self.check_array_broadcasted_op(operator.isub, no_bool=True) + + def test_broadcasted_mul(self): + self.check_array_broadcasted_op(operator.mul) + + def test_broadcasted_imul(self): + self.check_array_broadcasted_op(operator.imul) + + def test_broadcasted_truediv(self): + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op(operator.truediv) + + def test_broadcasted_itruediv(self): + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op(operator.itruediv) + + def test_broadcasted_floordiv(self): + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op(operator.floordiv, no_complex=True) + + def test_broadcasted_ifloordiv(self): + if "1.16.1" <= numpy.lib.NumpyVersion(numpy.__version__) < "1.18.0": + self.skipTest("NumPy Issue #12927") + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op(operator.ifloordiv, no_complex=True) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError) + def check_broadcasted_pow(self, xp, x_type, y_type): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[1], [2]], y_type) + return operator.pow(a, b) + + def test_broadcasted_pow(self): + # There are some precision issues in HIP that prevent + # checking with atol=0 + if cupy.cuda.runtime.is_hip: + self.check_broadcasted_pow() + else: + self.check_array_broadcasted_op(operator.pow) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError) + def check_broadcasted_ipow(self, xp, x_type, y_type): + a = xp.array([[1, 2, 3], [4, 5, 6]], x_type) + b = xp.array([[1], [2]], y_type) + return operator.ipow(a, b) + + def test_broadcasted_ipow(self): + self.check_broadcasted_ipow() + + def test_broadcasted_divmod0(self): + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op( + lambda x, y: divmod(x, y)[0], no_complex=True + ) + + def test_broadcasted_divmod1(self): + with numpy.errstate(divide="ignore"): + self.check_array_broadcasted_op( + lambda x, y: divmod(x, y)[1], no_complex=True + ) + + def test_broadcasted_lt(self): + self.check_array_broadcasted_op(operator.lt, no_complex=True) + + def test_broadcasted_le(self): + self.check_array_broadcasted_op(operator.le, no_complex=True) + + def test_broadcasted_gt(self): + self.check_array_broadcasted_op(operator.gt, no_complex=True) + + def test_broadcasted_ge(self): + self.check_array_broadcasted_op(operator.ge, no_complex=True) + + def test_broadcasted_eq(self): + self.check_array_broadcasted_op(operator.eq) + + def test_broadcasted_ne(self): + self.check_array_broadcasted_op(operator.ne) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(rtol=1e-6) + def check_array_doubly_broadcasted_op( + self, op, xp, x_type, y_type, no_bool=False, no_complex=False + ): + x_dtype = numpy.dtype(x_type) + y_dtype = numpy.dtype(y_type) + if no_bool and x_dtype == "?" and y_dtype == "?": + return xp.array(True) + if no_complex and (x_dtype.kind == "c" or y_dtype.kind == "c"): + return xp.array(True) + a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type) + b = xp.array([[1], [2], [3]], y_type) + return op(a, b) + + def test_doubly_broadcasted_add(self): + self.check_array_doubly_broadcasted_op(operator.add) + + def test_doubly_broadcasted_sub(self): + self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True) + + def test_doubly_broadcasted_mul(self): + self.check_array_doubly_broadcasted_op(operator.mul) + + def test_doubly_broadcasted_truediv(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_doubly_broadcasted_op(operator.truediv) + + def test_doubly_broadcasted_floordiv(self): + with numpy.errstate(divide="ignore"): + self.check_array_doubly_broadcasted_op( + operator.floordiv, no_complex=True + ) + + def test_doubly_broadcasted_pow(self): + self.check_array_doubly_broadcasted_op(operator.pow) + + def test_doubly_broadcasted_divmod0(self): + with numpy.errstate(divide="ignore"): + self.check_array_doubly_broadcasted_op( + lambda x, y: divmod(x, y)[0], no_complex=True + ) + + def test_doubly_broadcasted_divmod1(self): + with numpy.errstate(divide="ignore"): + self.check_array_doubly_broadcasted_op( + lambda x, y: divmod(x, y)[1], no_complex=True + ) + + def test_doubly_broadcasted_lt(self): + self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True) + + def test_doubly_broadcasted_le(self): + self.check_array_doubly_broadcasted_op(operator.le, no_complex=True) + + def test_doubly_broadcasted_gt(self): + self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True) + + def test_doubly_broadcasted_ge(self): + self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True) + + def test_doubly_broadcasted_eq(self): + self.check_array_doubly_broadcasted_op(operator.eq) + + def test_doubly_broadcasted_ne(self): + self.check_array_doubly_broadcasted_op(operator.ne) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose() + def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False): + if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_: + return xp.array(True) + a = xp.array([1, 2, 3, 4, 5], x_type) + b = xp.array([1, 2, 3, 4, 5], y_type) + return op(a, b[::-1]) + + def test_array_reversed_add(self): + self.check_array_reversed_op(operator.add) + + def test_array_reversed_sub(self): + self.check_array_reversed_op(operator.sub, no_bool=True) + + def test_array_reversed_mul(self): + self.check_array_reversed_op(operator.mul) + + @pytest.mark.parametrize( + "val", + [ + True, + False, + 0, + -127, + 255, + -32768, + 65535, + -2147483648, + 4294967295, + 0.0, + 100000.0, + ], + ) + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + ], + ) + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose(accept_error=OverflowError) + def test_typecast_(self, xp, op, dtype, val): + a = op(val, (testing.shaped_arange((5,), xp, dtype) - 2)) + return a + + @pytest.mark.parametrize( + "val", + [ + True, + False, + 0, + -127, + 255, + -32768, + 65535, + -2147483648, + 4294967295, + 0.0, + 100000.0, + ], + ) + @testing.for_all_dtypes(no_bool=True) + def test_typecast_2(self, dtype, val): + op = operator.truediv + with numpy.errstate(divide="ignore", invalid="ignore"): + a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2)) + b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2)) + assert a.dtype == b.dtype + + # Skip float16 because of NumPy #19514 + @testing.for_all_dtypes(name="x_type", no_float16=True) + @testing.numpy_cupy_allclose() + def check_array_boolarray_op(self, op, xp, x_type): + a = xp.array([[2, 7, 1], [8, 2, 8]], x_type) + # Cast from np.bool8 array should not read bytes + b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool) + return op(a, b) + + def test_add_array_boolarray(self): + self.check_array_boolarray_op(operator.add) + + def test_iadd_array_boolarray(self): + self.check_array_boolarray_op(operator.iadd) + + +class TestArrayIntElementwiseOp: + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False): + a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type) + if swap: + return op(y_type(2), a) + else: + return op(a, y_type(2)) + + def test_lshift_scalar(self): + self.check_array_scalar_op(operator.lshift) + + def test_rlshift_scalar(self): + self.check_array_scalar_op(operator.lshift, swap=True) + + def test_rshift_scalar(self): + self.check_array_scalar_op(operator.rshift) + + def test_rrshift_scalar(self): + self.check_array_scalar_op(operator.rshift, swap=True) + + def test_and_scalar(self): + self.check_array_scalar_op(operator.and_) + + def test_rand_scalar(self): + self.check_array_scalar_op(operator.and_, swap=True) + + def test_or_scalar(self): + self.check_array_scalar_op(operator.or_) + + def test_ror_scalar(self): + self.check_array_scalar_op(operator.or_, swap=True) + + def test_xor_scalar(self): + self.check_array_scalar_op(operator.xor) + + def test_rxor_scalar(self): + self.check_array_scalar_op(operator.xor, swap=True) + + def test_mod_scalar(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_scalar_op(operator.mod) + + def test_rmod_scalar(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_scalar_op(operator.mod, swap=True) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False): + a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type) + if swap: + return op(y_type(0), a) + else: + return op(a, y_type(0)) + + def test_lshift_scalarzero(self): + self.check_array_scalarzero_op(operator.lshift) + + def test_rlshift_scalarzero(self): + self.check_array_scalarzero_op(operator.lshift, swap=True) + + def test_rshift_scalarzero(self): + self.check_array_scalarzero_op(operator.rshift) + + def test_rrshift_scalarzero(self): + self.check_array_scalarzero_op(operator.rshift, swap=True) + + def test_and_scalarzero(self): + self.check_array_scalarzero_op(operator.and_) + + def test_rand_scalarzero(self): + self.check_array_scalarzero_op(operator.and_, swap=True) + + def test_or_scalarzero(self): + self.check_array_scalarzero_op(operator.or_) + + def test_ror_scalarzero(self): + self.check_array_scalarzero_op(operator.or_, swap=True) + + def test_xor_scalarzero(self): + self.check_array_scalarzero_op(operator.xor) + + def test_rxor_scalarzero(self): + self.check_array_scalarzero_op(operator.xor, swap=True) + + def test_mod_scalarzero(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_scalarzero_op(operator.mod) + + def test_rmod_scalarzero(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_scalarzero_op(operator.mod, swap=True) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_array_op(self, op, xp, x_type, y_type): + a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type) + b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type) + return op(a, b) + + def test_lshift_array(self): + self.check_array_array_op(operator.lshift) + + def test_ilshift_array(self): + self.check_array_array_op(operator.ilshift) + + def test_rshift_array(self): + self.check_array_array_op(operator.rshift) + + def test_irshift_array(self): + self.check_array_array_op(operator.irshift) + + def test_and_array(self): + self.check_array_array_op(operator.and_) + + def test_iand_array(self): + self.check_array_array_op(operator.iand) + + def test_or_array(self): + self.check_array_array_op(operator.or_) + + def test_ior_array(self): + self.check_array_array_op(operator.ior) + + def test_xor_array(self): + self.check_array_array_op(operator.xor) + + def test_ixor_array(self): + self.check_array_array_op(operator.ixor) + + def test_mod_array(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_array_op(operator.mod) + + def test_imod_array(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_array_op(operator.imod) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_broadcasted_op(self, op, xp, x_type, y_type): + a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type) + b = xp.array([[0, 0, 1]], dtype=y_type) + return op(a, b) + + def test_broadcasted_lshift(self): + self.check_array_broadcasted_op(operator.lshift) + + def test_broadcasted_ilshift(self): + self.check_array_broadcasted_op(operator.ilshift) + + def test_broadcasted_rshift(self): + self.check_array_broadcasted_op(operator.rshift) + + def test_broadcasted_irshift(self): + self.check_array_broadcasted_op(operator.irshift) + + def test_broadcasted_and(self): + self.check_array_broadcasted_op(operator.and_) + + def test_broadcasted_iand(self): + self.check_array_broadcasted_op(operator.iand) + + def test_broadcasted_or(self): + self.check_array_broadcasted_op(operator.or_) + + def test_broadcasted_ior(self): + self.check_array_broadcasted_op(operator.ior) + + def test_broadcasted_xor(self): + self.check_array_broadcasted_op(operator.xor) + + def test_broadcasted_ixor(self): + self.check_array_broadcasted_op(operator.ixor) + + def test_broadcasted_mod(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_broadcasted_op(operator.mod) + + def test_broadcasted_imod(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_broadcasted_op(operator.imod) + + @testing.for_all_dtypes_combination(names=["x_type", "y_type"]) + @testing.numpy_cupy_allclose(accept_error=TypeError) + def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type): + a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type) + b = xp.array([[0], [0], [1]], dtype=y_type) + return op(a, b) + + def test_doubly_broadcasted_lshift(self): + self.check_array_doubly_broadcasted_op(operator.lshift) + + def test_doubly_broadcasted_rshift(self): + self.check_array_doubly_broadcasted_op(operator.rshift) + + def test_doubly_broadcasted_and(self): + self.check_array_doubly_broadcasted_op(operator.and_) + + def test_doubly_broadcasted_or(self): + self.check_array_doubly_broadcasted_op(operator.or_) + + def test_doubly_broadcasted_xor(self): + self.check_array_doubly_broadcasted_op(operator.xor) + + def test_doubly_broadcasted_mod(self): + with numpy.errstate(divide="ignore", invalid="ignore"): + self.check_array_doubly_broadcasted_op(operator.mod) + + +@pytest.mark.parametrize( + "value", + [ + None, + Ellipsis, + object(), + numpy._NoValue, + ], +) +class TestArrayObjectComparison: + + @pytest.mark.parametrize("swap", [False, True]) + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_eq_object(self, xp, dtype, value, swap): + a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + if swap: + return value == a + else: + return a == value + + @pytest.mark.parametrize("swap", [False, True]) + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_ne_object(self, xp, dtype, value, swap): + a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + if swap: + return value != a + else: + return a != value + + +class HasEq: + def __eq__(self, other): + return (other == 2) | (other == 4) + + +class HasNe: + def __ne__(self, other): + return (other == 2) | (other == 4) + + +class HasEqSub(HasEq): + pass + + +class CustomInt(int): + pass + + +@pytest.mark.parametrize("dtype", ["int32", "float64"]) +@pytest.mark.parametrize( + "value", + [ + HasEq(), + HasNe(), # eq test passes because `==` does not fall back to `__ne__`. + HasEqSub(), + CustomInt(3), + ], +) +class TestArrayObjectComparisonDifficult: + + # OK to raise TypeError. + # If CuPy returns a result, it should match with NumPy's result. + + def test_eq_object(self, dtype, value): + expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value + + a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + try: + res = a == value + except TypeError: + pytest.skip() + + cupy.testing.assert_array_equal(res, expected) + + def test_ne_object(self, dtype, value): + expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value + + a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + try: + res = a != value + except TypeError: + pytest.skip() + + cupy.testing.assert_array_equal(res, expected) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_get.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_get.py new file mode 100644 index 000000000000..53b7b2cf110a --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_get.py @@ -0,0 +1,166 @@ +import unittest + +import numpy +import pytest +from numpy import testing as np_testing + +import dpnp as cupy + +# from cupy import cuda +from dpnp.tests.third_party.cupy import testing + +pytest.skip("get() method is not supported", allow_module_level=True) + + +class TestArrayGet(unittest.TestCase): + + def setUp(self): + self.stream = cuda.Stream.null + + def check_get(self, f, stream, order="C", blocking=True): + a_gpu = f(cupy) + a_cpu = a_gpu.get(stream, order=order, blocking=blocking) + if stream: + stream.synchronize() + b_cpu = f(numpy) + np_testing.assert_array_equal(a_cpu, b_cpu) + if order == "F" or (order == "A" and a_gpu.flags.f_contiguous): + assert a_cpu.flags.f_contiguous + else: + assert a_cpu.flags.c_contiguous + + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_contiguous_array(self, dtype, order): + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + self.check_get(contiguous_array, None, order) + + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_non_contiguous_array(self, dtype, order): + def non_contiguous_array(xp): + return testing.shaped_arange((3, 3), xp, dtype, order)[0::2, 0::2] + + self.check_get(non_contiguous_array, None, order) + + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_contiguous_array_stream(self, dtype, order): + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + self.check_get(contiguous_array, self.stream, order) + + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_contiguous_array_stream_nonblocking(self, dtype, order): + # Note: This is just a smoking gun test, the real test is done for + # testing cupy.asnumpy(), which under the hood calls .get(). + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + self.check_get(contiguous_array, self.stream, order, False) + + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_non_contiguous_array_stream(self, dtype, order): + def non_contiguous_array(xp): + return testing.shaped_arange((3, 3), xp, dtype, order)[0::2, 0::2] + + self.check_get(non_contiguous_array, self.stream) + + @testing.multi_gpu(2) + @testing.for_orders("CFA") + @testing.for_all_dtypes() + def test_get_multigpu(self, dtype, order): + with cuda.Device(1): + src = testing.shaped_arange((2, 3), cupy, dtype, order) + src = cupy.asfortranarray(src) + with cuda.Device(0): + dst = src.get() + expected = testing.shaped_arange((2, 3), numpy, dtype, order) + np_testing.assert_array_equal(dst, expected) + + +class TestArrayGetWithOut(unittest.TestCase): + + def setUp(self): + self.stream = cuda.Stream.null + + def check_get(self, f, out, stream): + a_gpu = f(cupy) + a_cpu = a_gpu.get(stream, out=out) + if stream: + stream.synchronize() + b_cpu = f(numpy) + assert a_cpu is out + np_testing.assert_array_equal(a_cpu, b_cpu) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_contiguous_array(self, dtype, order): + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + out = numpy.empty((3,), dtype, order) + self.check_get(contiguous_array, out, None) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_contiguous_array_cross(self, dtype, order): + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + out_order = "C" if order == "F" else "F" + out = numpy.empty((3,), dtype, out_order) + self.check_get(contiguous_array, out, None) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_contiguous_array_with_error(self, dtype, order): + out = numpy.empty((3, 3), dtype)[0:2, 0:2] + with self.assertRaises(RuntimeError): + a_gpu = testing.shaped_arange((3, 3), cupy, dtype, order)[0:2, 0:2] + a_gpu.get(out=out) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_non_contiguous_array(self, dtype, order): + def non_contiguous_array(xp): + return testing.shaped_arange((3, 3), xp, dtype, order)[0::2, 0::2] + + out = numpy.empty((2, 2), dtype, order) + self.check_get(non_contiguous_array, out, None) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_contiguous_array_stream(self, dtype, order): + def contiguous_array(xp): + return testing.shaped_arange((3,), xp, dtype, order) + + out = numpy.empty((3,), dtype, order) + self.check_get(contiguous_array, out, self.stream) + + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_non_contiguous_array_stream(self, dtype, order): + def non_contiguous_array(xp): + return testing.shaped_arange((3, 3), xp, dtype, order)[0::2, 0::2] + + out = numpy.empty((2, 2), dtype, order) + self.check_get(non_contiguous_array, out, self.stream) + + @testing.multi_gpu(2) + @testing.for_orders("CF") + @testing.for_all_dtypes() + def test_get_multigpu(self, dtype, order): + with cuda.Device(1): + src = testing.shaped_arange((2, 3), cupy, dtype, order) + src = cupy.asfortranarray(src) + with cuda.Device(0): + dst = numpy.empty((2, 3), dtype, order) + src.get(out=dst) + expected = testing.shaped_arange((2, 3), numpy, dtype, order) + np_testing.assert_array_equal(dst, expected) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_indexing.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_indexing.py new file mode 100644 index 000000000000..059345092b69 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_indexing.py @@ -0,0 +1,263 @@ +import unittest +import warnings + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +if numpy.lib.NumpyVersion(numpy.__version__) >= "2.0.0b1": + from numpy.exceptions import ComplexWarning +else: + from numpy import ComplexWarning + + +@testing.parameterize( + {"shape": (2, 3, 4), "transpose": None, "indexes": (1, 0, 2)}, + {"shape": (2, 3, 4), "transpose": None, "indexes": (-1, 0, -2)}, + {"shape": (2, 3, 4), "transpose": (2, 0, 1), "indexes": (1, 0, 2)}, + {"shape": (2, 3, 4), "transpose": (2, 0, 1), "indexes": (-1, 0, -2)}, + { + "shape": (2, 3, 4), + "transpose": None, + "indexes": (slice(None), slice(None, 1), slice(2)), + }, + { + "shape": (2, 3, 4), + "transpose": None, + "indexes": (slice(None), slice(None, -1), slice(-2)), + }, + { + "shape": (2, 3, 4), + "transpose": (2, 0, 1), + "indexes": (slice(None), slice(None, 1), slice(2)), + }, + { + "shape": (2, 3, 5), + "transpose": None, + "indexes": (slice(None, None, -1), slice(1, None, -1), slice(4, 1, -2)), + }, + { + "shape": (2, 3, 5), + "transpose": (2, 0, 1), + "indexes": (slice(4, 1, -2), slice(None, None, -1), slice(1, None, -1)), + }, + {"shape": (2, 3, 4), "transpose": None, "indexes": (Ellipsis, 2)}, + {"shape": (2, 3, 4), "transpose": None, "indexes": (1, Ellipsis)}, + {"shape": (2, 3, 4, 5), "transpose": None, "indexes": (1, Ellipsis, 3)}, + { + "shape": (2, 3, 4), + "transpose": None, + "indexes": (1, None, slice(2), None, 2), + }, + {"shape": (2, 3), "transpose": None, "indexes": (None,)}, + { + "shape": (2,), + "transpose": None, + "indexes": ( + slice( + None, + ), + None, + ), + }, + {"shape": (), "transpose": None, "indexes": (None,)}, + {"shape": (), "transpose": None, "indexes": (None, None)}, + {"shape": (10,), "transpose": None, "indexes": (slice(10, -9, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-9, -10, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-1, -10, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-1, -11, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-11, -11, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(10, -9, -3),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-1, -11, -3),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(1, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(0, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-1, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-4, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-6, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-10, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-11, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-12, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, 1, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, 0, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -1, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -4, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -5, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -6, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -10, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -11, -1),)}, + {"shape": (10,), "transpose": None, "indexes": (slice(-5, -12, -1),)}, + # reversing indexing on empty dimension + {"shape": (0,), "transpose": None, "indexes": (slice(None, None, -1),)}, + { + "shape": (0, 0), + "transpose": None, + "indexes": (slice(None, None, -1), slice(None, None, -1)), + }, + { + "shape": (0, 0), + "transpose": None, + "indexes": (None, slice(None, None, -1)), + }, + { + "shape": (0, 0), + "transpose": None, + "indexes": (slice(None, None, -1), None), + }, + { + "shape": (0, 1), + "transpose": None, + "indexes": (slice(None, None, -1), None), + }, + { + "shape": (1, 0), + "transpose": None, + "indexes": (None, slice(None, None, -1)), + }, + { + "shape": (1, 0, 1), + "transpose": None, + "indexes": (None, slice(None, None, -1), None), + }, + # + { + "shape": (2, 0), + "transpose": None, + "indexes": (1, slice(None, None, None)), + }, +) +class TestArrayIndexingParameterized(unittest.TestCase): + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_getitem(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + if self.transpose: + a = a.transpose(self.transpose) + return a[self.indexes] + + +@testing.parameterize( + {"shape": (), "transpose": None, "indexes": 0}, + # Do not test which class will be raised: + # - too many indices (IndexError) + # - slice step cannot be zero (ValueError) + # {'shape': (), 'transpose': None, 'indexes': (slice(0, 1, 0),)}, + {"shape": (2, 3), "transpose": None, "indexes": (0, 0, 0)}, + {"shape": (2, 3, 4), "transpose": None, "indexes": -3}, + {"shape": (2, 3, 4), "transpose": (2, 0, 1), "indexes": -5}, + {"shape": (2, 3, 4), "transpose": None, "indexes": 3}, + {"shape": (2, 3, 4), "transpose": (2, 0, 1), "indexes": 5}, + {"shape": (2, 3, 4), "transpose": None, "indexes": (Ellipsis, Ellipsis, 1)}, +) +class TestArrayIndexIndexError(unittest.TestCase): + + @testing.for_all_dtypes() + def test_invalid_getitem(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp, dtype) + if self.transpose: + a = a.transpose(self.transpose) + with pytest.raises(IndexError): + a[self.indexes] + + +@testing.parameterize( + {"error_class": ValueError, "indexes": (slice(0, 1, 0),)}, + {"error_class": TypeError, "indexes": (slice((0, 0), None, None),)}, + {"error_class": TypeError, "indexes": (slice(None, (0, 0), None),)}, + {"error_class": TypeError, "indexes": (slice(None, None, (0, 0)),)}, +) +class TestArrayIndexOtherError(unittest.TestCase): + + @testing.for_all_dtypes() + def test_invalid_getitem(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((2, 3, 4), xp, dtype) + with pytest.raises(self.error_class): + a[self.indexes] + + +class TestArrayIndex(unittest.TestCase): + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setitem_constant(self, xp, dtype): + a = xp.zeros((2, 3, 4), dtype=dtype) + a[:] = 1 + return a + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setitem_partial_constant(self, xp, dtype): + a = xp.zeros((2, 3, 4), dtype=dtype) + a[1, 1:3] = 1 + return a + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setitem_copy(self, xp, dtype): + a = xp.zeros((2, 3, 4), dtype=dtype) + b = testing.shaped_arange((2, 3, 4), xp, dtype) + a[:] = b + return a + + @testing.for_all_dtypes_combination(("src_type", "dst_type")) + @testing.numpy_cupy_array_equal() + def test_setitem_different_type(self, xp, src_type, dst_type): + a = xp.zeros((2, 3, 4), dtype=dst_type) + b = testing.shaped_arange((2, 3, 4), xp, src_type) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ComplexWarning) + a[:] = b + return a + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setitem_partial_copy(self, xp, dtype): + a = xp.zeros((2, 3, 4), dtype=dtype) + b = testing.shaped_arange((3, 2), xp, dtype) + a[1, ::-1, 1:4:2] = b + return a + + @testing.numpy_cupy_array_equal() + def test_T(self, xp): + a = testing.shaped_arange((2, 3, 4), xp) + return a.T + + @testing.numpy_cupy_array_equal() + def test_T_vector(self, xp): + a = testing.shaped_arange((4,), xp) + return a.T + + +class TestSetItemCompatBroadcast: + @testing.numpy_cupy_array_equal() + def test_simple(self, xp): + dtype = int + a = xp.zeros(4, dtype=dtype) + a[:] = testing.shaped_arange((1, 4), xp, dtype) + return a + + @testing.numpy_cupy_array_equal() + def test_other1(self, xp): + dtype = int + a = xp.zeros((2, 1, 3), dtype=dtype) + a[:] = testing.shaped_arange((1, 2, 1, 3), xp, dtype) + return a + + @testing.numpy_cupy_array_equal() + def test_0d(self, xp): + dtype = int + a = xp.zeros((), dtype=dtype) + a[...] = testing.shaped_arange((1, 1), xp, dtype) + return a + + @testing.numpy_cupy_array_equal() + def test_remain0d(self, xp): + dtype = int + a = xp.zeros((2, 3, 4), dtype=dtype) + a[0, 1, 2] = testing.shaped_arange((), xp, dtype) + return a diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_math.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_math.py index 7e98abfc5f21..11a6826c99a7 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_math.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_math.py @@ -3,6 +3,7 @@ import numpy import pytest +import dpnp as cupy from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing @@ -15,6 +16,7 @@ ) ) class TestRound(unittest.TestCase): + shape = (20,) @testing.for_all_dtypes() @@ -33,8 +35,9 @@ def test_round(self, xp, dtype): @testing.numpy_cupy_allclose(atol=1e-5) def test_round_out(self, xp): - dtype = "d" if has_support_aspect64() else "f" - a = testing.shaped_random(self.shape, xp, scale=100, dtype=dtype) + a = testing.shaped_random( + self.shape, xp, scale=100, dtype=cupy.default_float_type() + ) out = xp.empty_like(a) a.round(self.decimals, out) return out @@ -51,6 +54,7 @@ def test_round_out(self, xp): ) ) class TestRoundHalfway(unittest.TestCase): + shape = (20,) @testing.for_float_dtypes() @@ -112,6 +116,7 @@ def test_round_halfway_uint(self, xp, dtype): @testing.parameterize(*testing.product({"decimals": [-5, -4, -3, -2, -1, 0]})) class TestRoundMinMax(unittest.TestCase): + @unittest.skip("Known incompatibility: see core.pyx") @testing.numpy_cupy_array_equal() def _test_round_int64(self, xp): diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_owndata.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_owndata.py new file mode 100644 index 000000000000..dee220ab01fa --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_owndata.py @@ -0,0 +1,24 @@ +import unittest + +import pytest + +# from cupy import _core + +pytest.skip("owndata attribute is not supported", allow_module_level=True) + + +class TestArrayOwndata(unittest.TestCase): + + def setUp(self): + self.a = _core.ndarray(()) + + def test_original_array(self): + assert self.a.flags.owndata is True + + def test_view_array(self): + v = self.a.view() + assert v.flags.owndata is False + + def test_reshaped_array(self): + r = self.a.reshape(()) + assert r.flags.owndata is False diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_reduction.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_reduction.py index 802d38354d77..80b3f92fefa6 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_reduction.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_reduction.py @@ -1,11 +1,7 @@ -import unittest - import numpy import pytest import dpnp as cupy - -# from cupy.core import _accelerator from dpnp.tests.third_party.cupy import testing @@ -16,7 +12,36 @@ } ) ) -class TestArrayReduction(unittest.TestCase): +class TestArrayReduction: + + @pytest.fixture(scope="class") + def exclude_cutensor(self): + pass + + # # cuTENSOR seems to have issues in handling inf/nan in reduction-based + # # routines, so we use this fixture to skip testing it + # self.old_routine_accelerators = _acc.get_routine_accelerators() + # self.old_reduction_accelerators = _acc.get_reduction_accelerators() + + # rot_acc = self.old_routine_accelerators.copy() + # try: + # rot_acc.remove(_acc.ACCELERATOR_CUTENSOR) + # except ValueError: + # pass + # _acc.set_routine_accelerators(rot_acc) + + # red_acc = self.old_reduction_accelerators.copy() + # try: + # red_acc.remove(_acc.ACCELERATOR_CUTENSOR) + # except ValueError: + # pass + # _acc.set_reduction_accelerators(red_acc) + + # yield + + # _acc.set_routine_accelerators(self.old_routine_accelerators) + # _acc.set_reduction_accelerators(self.old_reduction_accelerators) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_max_all(self, xp, dtype): @@ -67,7 +92,7 @@ def test_max_multiple_axes_keepdims(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) - def test_max_nan(self, xp, dtype): + def test_max_nan(self, xp, dtype, exclude_cutensor): a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.max() @@ -85,6 +110,13 @@ def test_max_nan_imag(self, xp, dtype): ) return a.max() + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_max_inf(self, exclude_cutensor, xp, dtype): + # cupy/cupy#8180 + a = xp.array([-float("inf"), -float("inf")], dtype, order=self.order) + return a.max() + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) def test_min_all(self, xp, dtype): @@ -135,7 +167,7 @@ def test_min_multiple_axes_keepdims(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) - def test_min_nan(self, xp, dtype): + def test_min_nan(self, xp, dtype, exclude_cutensor): a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.min() @@ -153,77 +185,12 @@ def test_min_nan_imag(self, xp, dtype): ) return a.min() - # skip bool: numpy's ptp raises a TypeError on bool inputs - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_all(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype, order=self.order) - return xp.ptp(a) - - @testing.with_requires("numpy>=1.15") - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_all_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype, order=self.order) - return xp.ptp(a, keepdims=True) - - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_axis_large(self, xp, dtype): - a = testing.shaped_random((3, 1000), xp, dtype, order=self.order) - return xp.ptp(a, axis=0) - - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_axis0(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) - return xp.ptp(a, axis=0) - - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_axis1(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) - return xp.ptp(a, axis=1) - - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_axis2(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) - return xp.ptp(a, axis=2) - - @testing.with_requires("numpy>=1.15") - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_multiple_axes(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) - return xp.ptp(a, axis=(1, 2)) - - @testing.with_requires("numpy>=1.15") - @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_multiple_axes_keepdims(self, xp, dtype): - a = testing.shaped_random((2, 3, 4), xp, dtype, order=self.order) - return xp.ptp(a, axis=(1, 2), keepdims=True) - @testing.for_float_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_nan(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype, order=self.order) - return xp.ptp(a) - - @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_nan_real(self, xp, dtype): - a = xp.array([float("nan"), 1, -1], dtype, order=self.order) - return xp.ptp(a) - - @testing.for_complex_dtypes() - @testing.numpy_cupy_allclose(contiguous_check=False) - def test_ptp_nan_imag(self, xp, dtype): - a = xp.array( - [float("nan") * 1.0j, 1.0j, -1.0j], dtype, order=self.order - ) - return xp.ptp(a) + def test_min_inf(self, xp, dtype, exclude_cutensor): + # cupy/cupy#8180 + a = xp.array([float("inf"), float("inf")], dtype, order=self.order) + return a.min() @testing.for_all_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) @@ -257,7 +224,7 @@ def test_argmax_axis2(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) - def test_argmax_nan(self, xp, dtype): + def test_argmax_nan(self, xp, dtype, exclude_cutensor): a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.argmax() @@ -307,7 +274,7 @@ def test_argmin_axis2(self, xp, dtype): @testing.for_float_dtypes() @testing.numpy_cupy_allclose(contiguous_check=False) - def test_argmin_nan(self, xp, dtype): + def test_argmin_nan(self, xp, dtype, exclude_cutensor): a = xp.array([float("nan"), 1, -1], dtype, order=self.order) return a.argmin() @@ -362,11 +329,12 @@ def test_argmin_nan_imag(self, xp, dtype): ((2, 3, 0), (0, 1, 2)), ], "order": ("C", "F"), - "func": ("min", "max", "argmin", "argmax"), + "func": ("min", "max", "argmax", "argmin"), } ) ) class TestArrayReductionZeroSize: + @testing.numpy_cupy_allclose( contiguous_check=False, accept_error=ValueError ) @@ -385,68 +353,138 @@ def test_zero_size(self, xp): return getattr(a, self.func)(axis=axis) -# This class compares CUB results against NumPy's +# This class compares CUB results against NumPy's. ("fallback" is CuPy's +# original kernel, also tested here to reduce code duplication.) @testing.parameterize( *testing.product( { - "shape": [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], + "shape": [ + (10,), + (10, 20), + (10, 20, 30), + (10, 20, 30, 40), + # skip (2, 3, 0) because it would not hit the CUB code path + (0,), + (2, 0), + (0, 2), + (0, 2, 3), + (2, 3, 0), + ], "order": ("C", "F"), + "backend": ("device", "block", "fallback"), } ) ) -# @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled') -class TestCubReduction(unittest.TestCase): - # def setUp(self): - # self.old_accelerators = _accelerator.get_routine_accelerators() - # _accelerator.set_routine_accelerators(['cub']) - # - # def tearDown(self): - # _accelerator.set_routine_accelerators(self.old_accelerators) - - # @testing.for_contiguous_axes() - @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-5) +@pytest.mark.skip("CUB reduction is not supported") +class TestCubReduction: + + @pytest.fixture(autouse=True) + def setUp(self): + self.old_routine_accelerators = _acc.get_routine_accelerators() + self.old_reduction_accelerators = _acc.get_reduction_accelerators() + if self.backend == "device": + _acc.set_routine_accelerators(["cub"]) + _acc.set_reduction_accelerators([]) + elif self.backend == "block": + _acc.set_routine_accelerators([]) + _acc.set_reduction_accelerators(["cub"]) + elif self.backend == "fallback": + _acc.set_routine_accelerators([]) + _acc.set_reduction_accelerators([]) + yield + _acc.set_routine_accelerators(self.old_routine_accelerators) + _acc.set_reduction_accelerators(self.old_reduction_accelerators) + + @testing.for_contiguous_axes() + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose( + contiguous_check=False, accept_error=ValueError + ) def test_cub_min(self, xp, dtype, axis): - a = testing.shaped_random(self.shape, xp, dtype) - if self.order in ("c", "C"): - a = xp.ascontiguousarray(a) - elif self.order in ("f", "F"): - a = xp.asfortranarray(a) + a = testing.shaped_random(self.shape, xp, dtype, order=self.order) if xp is numpy: return a.min(axis=axis) # xp is cupy, first ensure we really use CUB ret = cupy.empty(()) # Cython checks return type, need to fool it - if len(axis) == len(self.shape): - func = "cupy.core._routines_statistics.cub.device_reduce" - else: - func = "cupy.core._routines_statistics.cub.device_segmented_reduce" - with testing.AssertFunctionIsCalled(func, return_value=ret): - a.min(axis=axis) + if self.backend == "device": + func_name = "cupy._core._routines_statistics.cub." + if len(axis) == len(self.shape): + func_name += "device_reduce" + else: + func_name += "device_segmented_reduce" + with testing.AssertFunctionIsCalled(func_name, return_value=ret): + a.min(axis=axis) + elif self.backend == "block": + # this is the only function we can mock; the rest is cdef'd + func_name = "cupy._core._cub_reduction." + func_name += "_SimpleCubReductionKernel_get_cached_function" + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + if len(axis) == len(self.shape): + times_called = 2 # two passes + else: + times_called = 1 # one pass + if a.size == 0: + times_called = 0 # _reduction.pyx has an early return path + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=times_called + ): + a.min(axis=axis) + elif self.backend == "fallback": + pass # ...then perform the actual computation return a.min(axis=axis) - # @testing.for_contiguous_axes() @testing.for_all_dtypes(no_bool=True, no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-5) + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_cub_min_empty_axis(self, xp, dtype, contiguous_check=False): + a = testing.shaped_random(self.shape, xp, dtype, order=self.order) + return a.min(axis=()) + + @testing.for_contiguous_axes() + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose( + contiguous_check=False, accept_error=ValueError + ) def test_cub_max(self, xp, dtype, axis): - a = testing.shaped_random(self.shape, xp, dtype) - if self.order in ("c", "C"): - a = xp.ascontiguousarray(a) - elif self.order in ("f", "F"): - a = xp.asfortranarray(a) + a = testing.shaped_random(self.shape, xp, dtype, order=self.order) if xp is numpy: return a.max(axis=axis) # xp is cupy, first ensure we really use CUB ret = cupy.empty(()) # Cython checks return type, need to fool it - if len(axis) == len(self.shape): - func = "cupy.core._routines_statistics.cub.device_reduce" - else: - func = "cupy.core._routines_statistics.cub.device_segmented_reduce" - with testing.AssertFunctionIsCalled(func, return_value=ret): - a.max(axis=axis) + if self.backend == "device": + func_name = "cupy._core._routines_statistics.cub." + if len(axis) == len(self.shape): + func_name += "device_reduce" + else: + func_name += "device_segmented_reduce" + with testing.AssertFunctionIsCalled(func_name, return_value=ret): + a.max(axis=axis) + elif self.backend == "block": + # this is the only function we can mock; the rest is cdef'd + func_name = "cupy._core._cub_reduction." + func_name += "_SimpleCubReductionKernel_get_cached_function" + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + if len(axis) == len(self.shape): + times_called = 2 # two passes + else: + times_called = 1 # one pass + if a.size == 0: + times_called = 0 # _reduction.pyx has an early return path + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=times_called + ): + a.max(axis=axis) + elif self.backend == "fallback": + pass # ...then perform the actual computation return a.max(axis=axis) + + @testing.for_all_dtypes(no_bool=True, no_float16=True) + @testing.numpy_cupy_allclose(contiguous_check=False) + def test_cub_max_empty_axis(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype, order=self.order) + return a.max(axis=()) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_scatter.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_scatter.py new file mode 100644 index 000000000000..7bfda0e0ce54 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_scatter.py @@ -0,0 +1,436 @@ +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("scatter is not supported", allow_module_level=True) + + +@testing.parameterize( + # array only + {"shape": (2, 3, 4), "slices": numpy.array(-1), "value": 1}, + {"shape": (2, 3, 4), "slices": numpy.array([1, 0]), "value": 1}, + {"shape": (2, 3, 4), "slices": (slice(None), [1, 2]), "value": 1}, + { + "shape": (3, 4, 5), + "slices": ( + slice(None), + [[1, 2], [0, -1]], + ), + "value": 1, + }, + { + "shape": (3, 4, 5), + "slices": (slice(None), slice(None), [[1, 2], [0, 3]]), + "value": 1, + }, + # array with duplicate indices + {"shape": (2, 3), "slices": ([1, 1], slice(None)), "value": 1}, + {"shape": (2, 3), "slices": ([1, 0, 1], slice(None)), "value": 1}, + {"shape": (2, 3), "slices": (slice(1, 2), [1, 0, 1]), "value": 1}, + # slice and array + { + "shape": (3, 4, 5), + "slices": (slice(None), slice(1, 2), [[1, 3], [0, 2]]), + "value": 1, + }, + # None and array + {"shape": (3, 4, 5), "slices": (None, [1, -1]), "value": 1}, + {"shape": (3, 4, 5), "slices": (None, [1, -1], None), "value": 1}, + {"shape": (3, 4, 5), "slices": (None, None, None, [1, -1]), "value": 1}, + # None, slice and array + {"shape": (3, 4, 5), "slices": (slice(0, 1), None, [1, -1]), "value": 1}, + { + "shape": (3, 4, 5), + "slices": (slice(0, 1), slice(1, 2), [1, -1]), + "value": 1, + }, + { + "shape": (3, 4, 5), + "slices": (slice(0, 1), None, slice(1, 2), [1, -1]), + "value": 1, + }, + # broadcasting + { + "shape": (3, 4, 5), + "slices": ( + slice(None), + [[1, 2], [0, -1]], + ), + "value": numpy.arange(3 * 2 * 2 * 5).reshape(3, 2, 2, 5), + }, + # multiple integer arrays + { + "shape": (2, 3, 4), + "slices": ([1, 0], [2, 1]), + "value": numpy.arange(2 * 4).reshape(2, 4), + }, + { + "shape": (2, 3, 4), + "slices": ([1, 0], slice(None), [2, 1]), + "value": numpy.arange(2 * 3).reshape(2, 3), + }, + { + "shape": (2, 3, 4), + "slices": ([1, 0], slice(None), [[2, 0], [3, 1]]), + "value": numpy.arange(2 * 2 * 3).reshape(2, 2, 3), + }, + { + "shape": (1, 1, 2, 3, 4), + "slices": (None, slice(None), 0, [1, 0], slice(0, 2, 2), [2, -1]), + "value": 1, + }, + # multiple integer arrays duplicate + { + "shape": (2, 3, 4), + "slices": ([1, 1], [1, 1]), + "value": numpy.arange(2 * 4).reshape(2, 4), + }, + { + "shape": (2, 3, 4), + "slices": ([1, 1], slice(None), [[2, 2], [3, 1]]), + "value": numpy.arange(2 * 2 * 3).reshape(2, 2, 3), + }, + { + "shape": (2, 3, 4), + "slices": ([1, 1], 1, [[2, 2], [3, 1]]), + "value": numpy.arange(2 * 2).reshape(2, 2), + }, + # mask + { + "shape": (3, 4, 5), + "slices": (numpy.random.choice([False, True], (3, 4, 5)),), + "value": 1, + }, + { + "shape": (3, 4, 5), + "slices": (numpy.random.choice([False, True], (3,)),), + "value": numpy.arange(4 * 5).reshape(4, 5), + }, + { + "shape": (3, 4, 5), + "slices": ( + slice(None), + numpy.array([True, False, False, True]), + ), + "value": numpy.arange(3 * 2 * 5).reshape(3, 2, 5), + }, + # empty arrays + {"shape": (2, 3, 4), "slices": [], "value": 1}, + {"shape": (2, 3, 4), "slices": [], "value": numpy.array([1, 1, 1, 1])}, + { + "shape": (2, 3, 4), + "slices": [], + "value": numpy.random.uniform(size=(3, 4)), + }, + { + "shape": (2, 3, 4), + "slices": numpy.array([], dtype=numpy.int32), + "value": 1, + }, + {"shape": (2, 3, 4), "slices": ([],), "value": 1}, + { + "shape": (2, 3, 4), + "slices": numpy.array([[]], dtype=numpy.int32), + "value": numpy.random.uniform(size=(3, 4)), + }, + {"shape": (2, 3, 4), "slices": ([[]],), "value": 1}, + {"shape": (2, 3, 4), "slices": ([[[]]],), "value": 1}, + {"shape": (2, 3, 4, 5), "slices": ([[[]]],), "value": 1}, + {"shape": (2, 3, 4, 5), "slices": ([[[[]]]],), "value": 1}, + {"shape": (2, 3, 4), "slices": (slice(None), []), "value": 1}, + {"shape": (2, 3, 4), "slices": ([], []), "value": 1}, + { + "shape": (2, 3, 4), + "slices": numpy.array([], dtype=numpy.bool_), + "value": 1, + }, + { + "shape": (2, 3, 4), + "slices": (slice(None), numpy.array([], dtype=numpy.bool_)), + "value": 1, + }, + { + "shape": (2, 3, 4), + "slices": numpy.array([[], []], dtype=numpy.bool_), + "value": numpy.random.uniform(size=(4,)), + }, + # list indexes + {"shape": (2, 3, 4), "slices": [1], "value": 1}, + { + "shape": (2, 3, 4), + "slices": [1, 1], + "value": numpy.arange(2 * 3 * 4).reshape(2, 3, 4), + }, + {"shape": (2, 3, 4), "slices": ([1],), "value": 1}, + {"shape": (2, 3, 4), "slices": ([1, 1],), "value": 1}, + {"shape": (2, 3, 4), "slices": ([1], [1]), "value": 1}, + {"shape": (2, 3, 4), "slices": ([1, 1], 1), "value": 1}, + {"shape": (2, 3, 4), "slices": ([1], slice(1, 2)), "value": 1}, + {"shape": (2, 3, 4), "slices": ([[1]], slice(1, 2)), "value": 1}, + _ids=False, # Do not generate ids from randomly generated params +) +class TestScatterParametrized: + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ] + ) + @testing.numpy_cupy_array_equal() + def test_scatter_add(self, xp, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + a = xp.zeros(self.shape, dtype) + xp.add.at(a, self.slices, self.value) + return a + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float64, + ] + ) + @testing.numpy_cupy_array_equal() + def test_scatter_max(self, xp, dtype): + a = xp.zeros(self.shape, dtype) + xp.maximum.at(a, self.slices, self.value) + return a + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float64, + ] + ) + @testing.numpy_cupy_array_equal() + def test_scatter_min(self, xp, dtype): + a = xp.zeros(self.shape, dtype) + xp.minimum.at(a, self.slices, self.value) + return a + + +class TestScatterAdd: + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ] + ) + def test_scatter_add_cupy_arguments(self, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (2, 3) + a = cupy.zeros(shape, dtype) + slices = (cupy.array([1, 1]), slice(None)) + cupy.add.at(a, slices, cupy.array(1.0)) + testing.assert_array_equal( + a, cupy.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], dtype) + ) + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ] + ) + def test_scatter_add_cupy_arguments_mask(self, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (2, 3) + a = cupy.zeros(shape, dtype) + slices = (cupy.array([True, False]), slice(None)) + cupy.add.at(a, slices, cupy.array(1.0)) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], dtype) + ) + + @testing.for_dtypes_combination( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ], + names=["src_dtype", "dst_dtype"], + ) + def test_scatter_add_differnt_dtypes(self, src_dtype, dst_dtype): + if cupy.cuda.runtime.is_hip and ( + src_dtype == numpy.float16 or dst_dtype == numpy.float16 + ): + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (2, 3) + a = cupy.zeros(shape, dtype=src_dtype) + value = cupy.array(1, dtype=dst_dtype) + slices = ([1, 1], slice(None)) + cupy.add.at(a, slices, value) + + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[0, 0, 0], [2, 2, 2]], dtype=src_dtype) + ) + + @testing.for_dtypes_combination( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ], + names=["src_dtype", "dst_dtype"], + ) + def test_scatter_add_differnt_dtypes_mask(self, src_dtype, dst_dtype): + if cupy.cuda.runtime.is_hip and ( + src_dtype == numpy.float16 or dst_dtype == numpy.float16 + ): + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (2, 3) + a = cupy.zeros(shape, dtype=src_dtype) + value = cupy.array(1, dtype=dst_dtype) + slices = numpy.array([[True, False, False], [False, True, True]]) + cupy.add.at(a, slices, value) + + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[1, 0, 0], [0, 1, 1]], dtype=src_dtype) + ) + + +class TestScatterMinMax: + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float64, + ] + ) + def test_scatter_minmax_cupy_arguments(self, dtype): + shape = (2, 3) + a = cupy.zeros(shape, dtype) + slices = (cupy.array([1, 1]), slice(None)) + cupy.maximum.at(a, slices, cupy.array(1.0)) + testing.assert_array_equal( + a, cupy.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], dtype) + ) + + a = cupy.ones(shape, dtype) + cupy.minimum.at(a, slices, cupy.array(0.0)) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], dtype) + ) + + @testing.for_dtypes( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float64, + ] + ) + def test_scatter_minmax_cupy_arguments_mask(self, dtype): + shape = (2, 3) + a = cupy.zeros(shape, dtype) + slices = (cupy.array([True, False]), slice(None)) + cupy.maximum.at(a, slices, cupy.array(1.0)) + testing.assert_array_equal( + a, cupy.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], dtype) + ) + + a = cupy.ones(shape, dtype) + cupy.minimum.at(a, slices, cupy.array(0.0)) + testing.assert_array_equal( + a, cupy.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], dtype) + ) + + @testing.for_dtypes_combination( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float64, + ], + names=["src_dtype", "dst_dtype"], + ) + def test_scatter_minmax_differnt_dtypes(self, src_dtype, dst_dtype): + shape = (2, 3) + a = cupy.zeros(shape, dtype=src_dtype) + value = cupy.array(1, dtype=dst_dtype) + slices = ([1, 1], slice(None)) + cupy.maximum.at(a, slices, value) + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[0, 0, 0], [1, 1, 1]], dtype=src_dtype) + ) + + a = cupy.ones(shape, dtype=src_dtype) + value = cupy.array(0, dtype=dst_dtype) + cupy.minimum.at(a, slices, value) + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[1, 1, 1], [0, 0, 0]], dtype=src_dtype) + ) + + @testing.for_dtypes_combination( + [ + numpy.float32, + numpy.int32, + numpy.uint32, + numpy.uint64, + numpy.ulonglong, + numpy.float16, + numpy.float64, + ], + names=["src_dtype", "dst_dtype"], + ) + def test_scatter_minmax_differnt_dtypes_mask(self, src_dtype, dst_dtype): + shape = (2, 3) + a = cupy.zeros(shape, dtype=src_dtype) + value = cupy.array(1, dtype=dst_dtype) + slices = numpy.array([[True, False, False], [False, True, True]]) + cupy.maximum.at(a, slices, value) + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[1, 0, 0], [0, 1, 1]], dtype=src_dtype) + ) + + a = cupy.ones(shape, dtype=src_dtype) + value = cupy.array(0, dtype=dst_dtype) + cupy.minimum.at(a, slices, value) + numpy.testing.assert_almost_equal( + a.get(), numpy.array([[0, 1, 1], [1, 0, 0]], dtype=src_dtype) + ) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_ufunc.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_ufunc.py new file mode 100644 index 000000000000..0a6624acc59b --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_ufunc.py @@ -0,0 +1,278 @@ +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("UFunc interface is not supported", allow_module_level=True) + + +class C(cupy.ndarray): + + def __new__(cls, *args, info=None, **kwargs): + obj = super().__new__(cls, *args, **kwargs) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + return + self.info = getattr(obj, "info", None) + + +class TestArrayUfunc: + + @testing.for_all_dtypes() + def test_unary_op(self, dtype): + a = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + outa = numpy.sin(a) + # numpy operation produced a cupy array + assert isinstance(outa, cupy.ndarray) + b = a.get() + outb = numpy.sin(b) + assert numpy.allclose(outa.get(), outb) + + @testing.for_all_dtypes() + def test_unary_op_out(self, dtype): + a = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + b = a.get() + outb = numpy.sin(b) + # pre-make output with same type as input + outa = cupy.array(numpy.array([0, 1, 2]), dtype=outb.dtype) + numpy.sin(a, out=outa) + assert numpy.allclose(outa.get(), outb) + + @testing.for_all_dtypes() + def test_binary_op(self, dtype): + a1 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + a2 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + outa = numpy.add(a1, a2) + # numpy operation produced a cupy array + assert isinstance(outa, cupy.ndarray) + b1 = a1.get() + b2 = a2.get() + outb = numpy.add(b1, b2) + assert numpy.allclose(outa.get(), outb) + + @testing.for_all_dtypes() + def test_binary_op_out(self, dtype): + a1 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + a2 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + outa = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + numpy.add(a1, a2, out=outa) + b1 = a1.get() + b2 = a2.get() + outb = numpy.add(b1, b2) + assert numpy.allclose(outa.get(), outb) + + @testing.for_all_dtypes() + def test_binary_mixed_op(self, dtype): + a1 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype) + a2 = cupy.array(numpy.array([0, 1, 2]), dtype=dtype).get() + with pytest.raises(TypeError): + # attempt to add cupy and numpy arrays + numpy.add(a1, a2) + with pytest.raises(TypeError): + # check reverse order + numpy.add(a2, a1) + with pytest.raises(TypeError): + # reject numpy output from cupy + numpy.add(a1, a1, out=a2) + with pytest.raises(TypeError): + # reject cupy output from numpy + numpy.add(a2, a2, out=a1) + with pytest.raises(ValueError): + # bad form for out= + # this is also an error with numpy array + numpy.sin(a1, out=()) + with pytest.raises(ValueError): + # bad form for out= + # this is also an error with numpy array + numpy.sin(a1, out=(a1, a1)) + + @testing.numpy_cupy_array_equal() + def test_indexing(self, xp): + a = cupy.testing.shaped_arange((3, 1), xp)[:, :, None] + b = cupy.testing.shaped_arange((3, 2), xp)[:, None, :] + return a * b + + @testing.numpy_cupy_array_equal() + def test_shares_memory(self, xp): + a = cupy.testing.shaped_arange((1000, 1000), xp, "int64") + b = xp.transpose(a) + a += b + return a + + def test_subclass_unary_op(self): + a = cupy.array([0, 1, 2]).view(C) + a.info = 1 + outa = cupy.sin(a) + assert isinstance(outa, C) + assert outa.info is not None and outa.info == 1 + + b = a.get() + outb = numpy.sin(b) + testing.assert_allclose(outa, outb) + + def test_subclass_binary_op(self): + a0 = cupy.array([0, 1, 2]).view(C) + a0.info = 1 + a1 = cupy.array([3, 4, 5]).view(C) + a1.info = 2 + outa = cupy.add(a0, a1) + assert isinstance(outa, C) + # a0 is used to initialize outa.info + assert outa.info is not None and outa.info == 1 + + b0 = a0.get() + b1 = a1.get() + outb = numpy.add(b0, b1) + testing.assert_allclose(outa, outb) + + def test_subclass_binary_op_mixed(self): + a0 = cupy.array([0, 1, 2]) + a1 = cupy.array([3, 4, 5]).view(C) + a1.info = 1 + outa = cupy.add(a0, a1) + assert isinstance(outa, C) + # The first appearance of C's instance is used to initialize outa.info + assert outa.info is not None and outa.info == 1 + + b0 = a0.get() + b1 = a1.get() + outb = numpy.add(b0, b1) + testing.assert_allclose(outa, outb) + + @testing.numpy_cupy_array_equal() + def test_ufunc_outer(self, xp): + a = cupy.testing.shaped_arange((3, 4), xp) + b = cupy.testing.shaped_arange((5, 6), xp) + return numpy.add.outer(a, b) + + @testing.numpy_cupy_array_equal() + def test_ufunc_at(self, xp): + a = cupy.testing.shaped_arange((10,), xp) + b = cupy.testing.shaped_arange((5,), xp) + indices = xp.array([0, 3, 6, 7, 9]) + numpy.add.at(a, indices, b) + return a + + @testing.numpy_cupy_array_equal() + def test_ufunc_at_scalar(self, xp): + a = cupy.testing.shaped_arange((10,), xp) + b = 7 + indices = xp.array([0, 3, 6, 7, 9]) + numpy.add.at(a, indices, b) + return a + + @testing.numpy_cupy_array_equal() + def test_ufunc_reduce(self, xp): + a = cupy.testing.shaped_arange((10, 12), xp) + return numpy.add.reduce(a, axis=-1) + + @testing.numpy_cupy_array_equal() + def test_ufunc_accumulate(self, xp): + a = cupy.testing.shaped_arange((10, 12), xp) + return numpy.add.accumulate(a, axis=-1) + + @testing.numpy_cupy_array_equal() + def test_ufunc_reduceat(self, xp): + a = cupy.testing.shaped_arange((10, 12), xp) + indices = xp.array([0, 3, 6, 7, 9]) + return numpy.add.reduceat(a, indices, axis=-1) + + +class TestUfunc: + @pytest.mark.parametrize( + "ufunc", + [ + "add", + "sin", + ], + ) + @testing.numpy_cupy_equal() + def test_types(self, xp, ufunc): + types = getattr(xp, ufunc).types + if xp == numpy: + assert isinstance(types, list) + types = list( + dict.fromkeys( # remove dups: numpy/numpy#7897 + sig + for sig in types + # CuPy does not support the following dtypes: + # (c)longdouble, datetime, timedelta, and object. + if not any(t in sig for t in "GgMmO") + ) + ) + return types + + @testing.numpy_cupy_allclose() + def test_unary_out_tuple(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + out = xp.zeros((2, 3), dtype) + ret = xp.sin(a, out=(out,)) + assert ret is out + return ret + + @testing.numpy_cupy_allclose() + def test_unary_out_positional_none(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + return xp.sin(a, None) + + @testing.numpy_cupy_allclose() + def test_binary_out_tuple(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = xp.ones((2, 3), dtype) + out = xp.zeros((2, 3), dtype) + ret = xp.add(a, b, out=(out,)) + assert ret is out + return ret + + @testing.numpy_cupy_allclose() + def test_biary_out_positional_none(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = xp.ones((2, 3), dtype) + return xp.add(a, b, None) + + @testing.numpy_cupy_allclose() + def test_divmod_out_tuple(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = testing.shaped_reverse_arange((2, 3), xp, dtype) + out0 = xp.zeros((2, 3), dtype) + out1 = xp.zeros((2, 3), dtype) + ret = xp.divmod(a, b, out=(out0, out1)) + assert ret[0] is out0 + assert ret[1] is out1 + return ret + + @testing.numpy_cupy_allclose() + def test_divmod_out_positional_none(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = xp.ones((2, 3), dtype) + return xp.divmod(a, b, None, None) + + @testing.numpy_cupy_allclose() + def test_divmod_out_partial(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = testing.shaped_reverse_arange((2, 3), xp, dtype) + out0 = xp.zeros((2, 3), dtype) + ret = xp.divmod(a, b, out0) # out1 is None + assert ret[0] is out0 + return ret + + @testing.numpy_cupy_allclose() + def test_divmod_out_partial_tuple(self, xp): + dtype = xp.float64 + a = testing.shaped_arange((2, 3), xp, dtype) + b = testing.shaped_reverse_arange((2, 3), xp, dtype) + out1 = xp.zeros((2, 3), dtype) + ret = xp.divmod(a, b, out=(None, out1)) + assert ret[1] is out1 + return ret diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_unary_op.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_unary_op.py index 18b398262b98..3a1e30fc894e 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray_unary_op.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray_unary_op.py @@ -40,6 +40,7 @@ def test_bool_two_elements(self, dtype): class TestArrayUnaryOp(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def check_array_op(self, op, xp, dtype): @@ -111,6 +112,7 @@ def test_abs_zerodim_full(self): class TestArrayIntUnaryOp(unittest.TestCase): + @testing.for_int_dtypes() @testing.numpy_cupy_allclose() def check_array_op(self, op, xp, dtype): @@ -141,6 +143,7 @@ def test_invert_zerodim(self): *testing.product({"xp": [numpy, cupy], "shape": [(3, 2), (), (3, 0, 2)]}) ) class TestBoolNeg(unittest.TestCase): + def test_bool_neg(self): xp = self.xp if xp is numpy and not testing.numpy_satisfies(">=1.13.0"): diff --git a/dpnp/tests/third_party/cupy/core_tests/test_nep50_examples.py b/dpnp/tests/third_party/cupy/core_tests/test_nep50_examples.py new file mode 100644 index 000000000000..02390f7f373a --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_nep50_examples.py @@ -0,0 +1,70 @@ +import numpy +import pytest + +import dpnp as cp +from dpnp.tests.third_party.cupy import testing + +# TODO: remove once all dtype aliases added +cp.int8 = numpy.int8 +cp.uint8 = numpy.uint8 +cp.int16 = numpy.int16 + +# "example string" or +# ("example string", "xfail message") +examples = [ + "uint8(1) + 2", + "array([1], uint8) + int64(1)", + "array([1], uint8) + array(1, int64)", + "array([1.], float32) + float64(1.)", + "array([1.], float32) + array(1., float64)", + "array([1], uint8) + 1", + "array([1], uint8) + 200", + "array([100], uint8) + 200", + "array([1], uint8) + 300", + "uint8(1) + 300", + "uint8(100) + 200", + "float32(1) + 3e100", + "array([1.0], float32) + 1e-14 == 1.0", + "array([0.1], float32) == float64(0.1)", + "array(1.0, float32) + 1e-14 == 1.0", + "array([1.], float32) + 3", + "array([1.], float32) + int64(3)", + "3j + array(3, complex64)", + "float32(1) + 1j", + "int32(1) + 5j", + # additional examples from the NEP text + "int16(2) + 2", + "int16(4) + 4j", + "float32(5) + 5j", + "bool_(True) + 1", + "True + uint8(2)", + # not in the NEP + "1.0 + array([1, 2, 3], int8)", + "array([1], float32) + 1j", +] + + +@pytest.mark.filterwarnings("ignore::RuntimeWarning") +@testing.with_requires("numpy>=2.0") +@pytest.mark.parametrize("example", examples) +@testing.numpy_cupy_allclose(atol=1e-15, accept_error=OverflowError) +def test_nep50_examples(xp, example): + dct = { + "array": xp.array, + "uint8": xp.uint8, + "int64": xp.int64, + "float32": xp.float32, + "float64": xp.float64, + "int16": xp.int16, + "bool_": xp.bool_, + "int32": xp.int32, + "complex64": xp.complex64, + "int8": xp.int8, + } + + if isinstance(example, tuple): + example, mesg = example + pytest.xfail(mesg) + + result = eval(example, dct) + return result diff --git a/dpnp/tests/third_party/cupy/core_tests/test_raw.py b/dpnp/tests/third_party/cupy/core_tests/test_raw.py new file mode 100644 index 000000000000..4885e89801c9 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_raw.py @@ -0,0 +1,1492 @@ +import contextlib +import io +import os +import pickle +import re +import subprocess +import sys +import tempfile +import unittest +from unittest import mock + +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# from cupy import _util +# from cupy._core import _accelerator +# from cupy.cuda import compiler +# from cupy.cuda import memory +# from cupy_backends.cuda.libs import nvrtc + +pytest.skip("RawKernel is not supported", allow_module_level=True) + +_test_source1 = r""" +extern "C" __global__ +void test_sum(const float* x1, const float* x2, float* y, unsigned int N) { + int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + y[tid] = x1[tid] + x2[tid]; +} +""" + +_test_compile_src = r""" +extern "C" __global__ +void test_op(const float* x1, const float* x2, float* y, unsigned int N) { + int tid = blockDim.x * blockIdx.x + threadIdx.x; + int j; // To generate a warning to appear in the log stream + if (tid < N) + y[tid] = x1[tid] OP x2[tid]; +} +""" + +# test compiling and invoking multiple kernels in one single .cubin +_test_source2 = r""" +extern "C"{ + +__global__ void test_sum(const float* x1, const float* x2, float* y, \ + unsigned int N) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + { + y[tid] = x1[tid] + x2[tid]; + } +} + +__global__ void test_multiply(const float* x1, const float* x2, float* y, \ + unsigned int N) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + { + y[tid] = x1[tid] * x2[tid]; + } +} + +} +""" + +# test C macros +_test_source3 = r""" +#ifndef PRECISION + #define PRECISION 2 +#endif + +#if PRECISION == 2 + #define TYPE double +#elif PRECISION == 1 + #define TYPE float +#else + #error precision not supported +#endif + +extern "C"{ + +__global__ void test_sum(const TYPE* x1, const TYPE* x2, TYPE* y, \ + unsigned int N) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + { + y[tid] = x1[tid] + x2[tid]; + } +} + +__global__ void test_multiply(const TYPE* x1, const TYPE* x2, TYPE* y, \ + unsigned int N) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + { + y[tid] = x1[tid] * x2[tid]; + } +} + +} +""" + +# dynamic parallelism +_test_source4 = r""" +extern "C"{ + +__global__ void test_kernel_inner(float *arr, int N) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + + if (tid < N) + arr[tid] = 1.0; +} + +__global__ void test_kernel(float *arr, int N, int inner_blk) +{ + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + + if (tid < N/inner_blk) + test_kernel_inner<<<1, inner_blk>>>(arr+tid*inner_blk, inner_blk); +} + +} +""" + +# to generate cubin/ptx +_test_source5 = r""" +extern "C" __global__ +void test_div(const float* x1, const float* x2, float* y, unsigned int N) { + int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) + y[tid] = x1[tid] / (x2[tid] + 1.0); +} +""" + +_test_cuComplex = r""" +#include +#define N 100 + +extern "C"{ +/* ------------------- double complex ------------------- */ + +__global__ void test_add(cuDoubleComplex* arr1, cuDoubleComplex* arr2, + cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCadd(arr1[tid], arr2[tid]); + } +} + +__global__ void test_sub(cuDoubleComplex* arr1, cuDoubleComplex* arr2, + cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCsub(arr1[tid], arr2[tid]); + } +} + +__global__ void test_mul(cuDoubleComplex* arr1, cuDoubleComplex* arr2, + cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCmul(arr1[tid], arr2[tid]); + } +} + +__global__ void test_div(cuDoubleComplex* arr1, cuDoubleComplex* arr2, + cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCdiv(arr1[tid], arr2[tid]); + } +} + +__global__ void test_conj(cuDoubleComplex* arr, cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuConj(arr[tid]); + } +} + +__global__ void test_abs(cuDoubleComplex* arr, double* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCabs(arr[tid]); + } +} + +__global__ void test_fma(cuDoubleComplex* A, cuDoubleComplex* B, + cuDoubleComplex* C, cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCfma(A[tid], B[tid], C[tid]); + } +} + +__global__ void test_make(cuDoubleComplex* arr) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + cuDoubleComplex out = make_cuDoubleComplex(1.8, 2.9); + if (tid < N) { + arr[tid] = make_cuDoubleComplex(cuCreal(out), -3.*cuCimag(out)); + } +} + +__global__ void test_downcast(cuDoubleComplex* arr, cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuComplexDoubleToFloat(arr[tid]); + } +} + +__global__ void test_add_scalar(cuDoubleComplex* arr, cuDoubleComplex scalar, + cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCadd(arr[tid], scalar); + } +} + +/* ------------------- single complex ------------------- */ + +__global__ void test_addf(cuComplex* arr1, cuComplex* arr2, + cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCaddf(arr1[tid], arr2[tid]); + } +} + +__global__ void test_subf(cuComplex* arr1, cuComplex* arr2, + cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCsubf(arr1[tid], arr2[tid]); + } +} + +__global__ void test_mulf(cuComplex* arr1, cuComplex* arr2, + cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCmulf(arr1[tid], arr2[tid]); + } +} + +__global__ void test_divf(cuComplex* arr1, cuComplex* arr2, + cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCdivf(arr1[tid], arr2[tid]); + } +} + +__global__ void test_conjf(cuComplex* arr, cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuConjf(arr[tid]); + } +} + +__global__ void test_absf(cuFloatComplex* arr, float* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCabsf(arr[tid]); + } +} + +__global__ void test_fmaf(cuFloatComplex* A, cuFloatComplex* B, + cuFloatComplex* C, cuFloatComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCfmaf(A[tid], B[tid], C[tid]); + } +} + +__global__ void test_makef(cuComplex* arr) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + cuComplex out = make_cuFloatComplex(1.8, 2.9); + if (tid < N) { + arr[tid] = make_cuFloatComplex(cuCrealf(out), -3.*cuCimagf(out)); + } +} + +__global__ void test_upcast(cuComplex* arr, cuDoubleComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuComplexFloatToDouble(arr[tid]); + } +} + +__global__ void test_addf_scalar(cuComplex* arr, cuComplex scalar, + cuComplex* out) { + unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < N) { + out[tid] = cuCadd(arr[tid], scalar); + } +} + +} +""" + +test_const_mem = r""" +extern "C"{ +__constant__ float some_array[100]; + +__global__ void multiply_by_const(float* x, int N) { + int id = threadIdx.x + blockIdx.x * blockDim.x; + + if (id < N) { + x[id] *= some_array[id]; + } +} +} +""" + +test_cxx_template = r""" +#include + +template +__global__ void my_sqrt(T* input, int N) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + if (x < N) { + input[x] *= input[x]; + } +} + +__global__ void my_func(double* input, int N) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + if (x < N) { + input[x] *= input[x]; + } +} +""" + +test_cast = r""" +extern "C" __global__ void my_func(void* input, int N) { + double* arr = (double*)(input); + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + if (x < N) { + arr[x] = 3.0 * arr[x] - 8.0; + } +} +""" + + +@contextlib.contextmanager +def use_temporary_cache_dir(): + target1 = "cupy.cuda.compiler.get_cache_dir" + target2 = "cupy.cuda.compiler._empty_file_preprocess_cache" + temp_cache = {} + with tempfile.TemporaryDirectory() as path: + with mock.patch(target1, lambda: path): + with mock.patch(target2, temp_cache): + yield path + + +@contextlib.contextmanager +def compile_in_memory(in_memory): + target = "cupy.cuda.compiler._get_bool_env_variable" + + def new_target(name, default): + if name == "CUPY_CACHE_IN_MEMORY": + return in_memory + else: + # below is the source code of _get_bool_env_variable + val = os.environ.get(name) + if val is None or len(val) == 0: + return default + try: + return int(val) == 1 + except ValueError: + return False + + with mock.patch(target, new_target) as m: + yield m + + +def find_nvcc_ver(): + nvcc_ver_pattern = r"release (\d+\.\d+)" + cmd = cupy.cuda.get_nvcc_path().split() + cmd += ["--version"] + cache_ctx = use_temporary_cache_dir() + with cache_ctx as cache_path: + output = compiler._run_cc(cmd, cache_path, "nvcc") + match = re.search(nvcc_ver_pattern, output) + assert match + + # convert to driver ver format + major, minor = match.group(1).split(".") + return int(major) * 1000 + int(minor) * 10 + + +@testing.parameterize( + # First test NVRTC + {"backend": "nvrtc", "in_memory": False}, + # this run will read from in-memory cache + {"backend": "nvrtc", "in_memory": True}, + # this run will force recompilation + {"backend": "nvrtc", "in_memory": True, "clean_up": True}, + # Below is the same set of NVRTC tests, with Jitify turned on. For tests + # that can already pass, it shouldn't matter whether Jitify is on or not, + # and the only side effect is to add overhead. It doesn't make sense to + # test NVCC + Jitify. + {"backend": "nvrtc", "in_memory": False, "jitify": True}, + {"backend": "nvrtc", "in_memory": True, "jitify": True}, + {"backend": "nvrtc", "in_memory": True, "clean_up": True, "jitify": True}, + # Finally, we test NVCC + {"backend": "nvcc", "in_memory": False}, +) +class TestRaw(unittest.TestCase): + + _nvcc_ver = None + _nvrtc_ver = None + + def setUp(self): + if hasattr(self, "clean_up"): + if cupy.cuda.runtime.is_hip: + # Clearing memo triggers recompiling kernels using name + # expressions in other tests, e.g. dot and matmul, which + # hits a nvrtc bug. See #5843, #5945 and #6725. + self.skipTest("Clearing memo hits a nvrtc bug in other tests") + _util.clear_memo() + self.dev = cupy.cuda.runtime.getDevice() + assert self.dev != 1 + if not hasattr(self, "jitify"): + self.jitify = False + if cupy.cuda.runtime.is_hip and self.jitify: + self.skipTest("Jitify does not support ROCm/HIP") + + self.temporary_cache_dir_context = use_temporary_cache_dir() + self.in_memory_context = compile_in_memory(self.in_memory) + self.cache_dir = self.temporary_cache_dir_context.__enter__() + self.in_memory_context.__enter__() + + self.kern = cupy.RawKernel( + _test_source1, "test_sum", backend=self.backend, jitify=self.jitify + ) + self.mod2 = cupy.RawModule( + code=_test_source2, backend=self.backend, jitify=self.jitify + ) + self.mod3 = cupy.RawModule( + code=_test_source3, + options=("-DPRECISION=2",), + backend=self.backend, + jitify=self.jitify, + ) + + def tearDown(self): + if ( + self.in_memory + and _accelerator.ACCELERATOR_CUB + not in _accelerator.get_reduction_accelerators() + ): + # should not write any file to the cache dir, but the CUB reduction + # kernel uses nvcc, with which I/O cannot be avoided + files = os.listdir(self.cache_dir) + for f in files: + if f == "test_load_cubin.cu": + count = 1 + break + else: + count = 0 + assert len(files) == count + + self.in_memory_context.__exit__(*sys.exc_info()) + self.temporary_cache_dir_context.__exit__(*sys.exc_info()) + + def _helper(self, kernel, dtype): + N = 10 + x1 = cupy.arange(N**2, dtype=dtype).reshape(N, N) + x2 = cupy.ones((N, N), dtype=dtype) + y = cupy.zeros((N, N), dtype=dtype) + kernel((N,), (N,), (x1, x2, y, N**2)) + return x1, x2, y + + def test_basic(self): + x1, x2, y = self._helper(self.kern, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + def test_kernel_attributes(self): + attrs = self.kern.attributes + for attribute in [ + "binary_version", + "cache_mode_ca", + "const_size_bytes", + "local_size_bytes", + "max_dynamic_shared_size_bytes", + "max_threads_per_block", + "num_regs", + "preferred_shared_memory_carveout", + "ptx_version", + "shared_size_bytes", + ]: + assert attribute in attrs + # TODO(leofang): investigate why this fails on ROCm 3.5.0 + if not cupy.cuda.runtime.is_hip: + assert self.kern.num_regs > 0 + assert self.kern.max_threads_per_block > 0 + assert self.kern.shared_size_bytes == 0 + + def test_module(self): + module = self.mod2 + ker_sum = module.get_function("test_sum") + ker_times = module.get_function("test_multiply") + + x1, x2, y = self._helper(ker_sum, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + x1, x2, y = self._helper(ker_times, cupy.float32) + assert cupy.allclose(y, x1 * x2) + + def test_compiler_flag(self): + module = self.mod3 + ker_sum = module.get_function("test_sum") + ker_times = module.get_function("test_multiply") + + x1, x2, y = self._helper(ker_sum, cupy.float64) + assert cupy.allclose(y, x1 + x2) + + x1, x2, y = self._helper(ker_times, cupy.float64) + assert cupy.allclose(y, x1 * x2) + + def test_invalid_compiler_flag(self): + if cupy.cuda.runtime.is_hip and self.backend == "nvrtc": + self.skipTest("hiprtc does not handle #error macro properly") + + if self.jitify: + ex_type = cupy.cuda.compiler.JitifyException + else: + ex_type = cupy.cuda.compiler.CompileException + + with pytest.raises(ex_type) as ex: + mod = cupy.RawModule( + code=_test_source3, + options=("-DPRECISION=3",), + backend=self.backend, + jitify=self.jitify, + ) + mod.get_function("test_sum") # enforce compilation + + if not self.jitify: + assert "precision not supported" in str(ex.value) + + def _find_nvcc_ver(self): + if self._nvcc_ver: + return self._nvcc_ver + + self._nvcc_ver = find_nvcc_ver() + return self._nvcc_ver + + def _find_nvrtc_ver(self): + if self._nvrtc_ver: + return self._nvrtc_ver + + # convert to driver ver format + major, minor = nvrtc.getVersion() + self._nvrtc_ver = int(major) * 1000 + int(minor) * 10 + return self._nvrtc_ver + + def _check_ptx_loadable(self, compiler: str): + # if the PTX version is higher than the driver version, it won't + # be either JIT'able (CUDA_ERROR_UNSUPPORTED_PTX_VERSION) or loadable + # (CUDA_ERROR_NO_BINARY_FOR_GPU), see the table at + # https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#release-notes-ptx-release-history + if compiler == "nvrtc": + compiler_ver = self._find_nvrtc_ver() + elif compiler == "nvcc": + compiler_ver = self._find_nvcc_ver() + driver_ver = cupy.cuda.runtime.driverGetVersion() + if driver_ver < compiler_ver: + raise pytest.skip() + + def _generate_file(self, ext: str): + # generate cubin/ptx by calling nvcc/hipcc + + if not cupy.cuda.runtime.is_hip: + cc = cupy.cuda.get_nvcc_path() + arch = "-gencode=arch=compute_{CC},code=sm_{CC}".format( + CC=compiler._get_arch() + ) + code = _test_source5 + else: + # TODO(leofang): expose get_hipcc_path() to cupy.cuda? + cc = cupy._environment.get_hipcc_path() + arch = "-v" # dummy + code = compiler._convert_to_hip_source(_test_source5, None, False) + # split() is needed because nvcc could come from the env var NVCC + cmd = cc.split() + source = "{}/test_load_cubin.cu".format(self.cache_dir) + file_path = self.cache_dir + "test_load_cubin" + with open(source, "w") as f: + f.write(code) + if not cupy.cuda.runtime.is_hip: + if ext == "cubin": + file_path += ".cubin" + flag = "-cubin" + elif ext == "ptx": + file_path += ".ptx" + flag = "-ptx" + else: + raise ValueError + else: + file_path += ".hsaco" + flag = "--genco" + cmd += [arch, flag, source, "-o", file_path] + cc = "nvcc" if not cupy.cuda.runtime.is_hip else "hipcc" + compiler._run_cc(cmd, self.cache_dir, cc) + + return file_path + + @unittest.skipIf(cupy.cuda.runtime.is_hip, "HIP uses hsaco, not cubin") + def test_load_cubin(self): + # generate cubin in the temp dir + file_path = self._generate_file("cubin") + + # load cubin and test the kernel + mod = cupy.RawModule(path=file_path, backend=self.backend) + ker = mod.get_function("test_div") + x1, x2, y = self._helper(ker, cupy.float32) + assert cupy.allclose(y, x1 / (x2 + 1.0)) + + @unittest.skipIf(cupy.cuda.runtime.is_hip, "HIP uses hsaco, not ptx") + def test_load_ptx(self): + # use nvcc to generate ptx in the temp dir + self._check_ptx_loadable("nvcc") + file_path = self._generate_file("ptx") + + # load ptx and test the kernel + mod = cupy.RawModule(path=file_path, backend=self.backend) + ker = mod.get_function("test_div") + x1, x2, y = self._helper(ker, cupy.float32) + assert cupy.allclose(y, x1 / (x2 + 1.0)) + + @unittest.skipIf( + not cupy.cuda.runtime.is_hip, "CUDA uses cubin/ptx, not hsaco" + ) + def test_load_hsaco(self): + # generate hsaco in the temp dir + file_path = self._generate_file("hsaco") + + # load cubin and test the kernel + mod = cupy.RawModule(path=file_path, backend=self.backend) + ker = mod.get_function("test_div") + x1, x2, y = self._helper(ker, cupy.float32) + assert cupy.allclose(y, x1 / (x2 + 1.0)) + + def test_module_load_failure(self): + # in principle this test is better done in test_driver.py, but + # this error is more likely to appear when using RawModule, so + # let us do it here + with pytest.raises(cupy.cuda.driver.CUDADriverError) as ex: + mod = cupy.RawModule( + path=os.path.expanduser("~/this_does_not_exist.cubin"), + backend=self.backend, + ) + mod.get_function("nonexisting_kernel") # enforce loading + assert "CUDA_ERROR_FILE_NOT_FOUND" in str( + ex.value + ) or "hipErrorFileNotFound" in str( # CUDA + ex.value + ) # HIP + + def test_module_neither_code_nor_path(self): + with pytest.raises(TypeError): + cupy.RawModule() + + def test_module_both_code_and_path(self): + with pytest.raises(TypeError): + cupy.RawModule(code=_test_source1, path="test.cubin") + + def test_get_function_failure(self): + # in principle this test is better done in test_driver.py, but + # this error is more likely to appear when using RawModule, so + # let us do it here + with pytest.raises(cupy.cuda.driver.CUDADriverError) as ex: + self.mod2.get_function("no_such_kernel") + assert "CUDA_ERROR_NOT_FOUND" in str( + ex.value + ) or "hipErrorNotFound" in str( # for CUDA + ex.value + ) # for HIP + + @unittest.skipIf( + cupy.cuda.runtime.is_hip, + "ROCm/HIP does not support dynamic parallelism", + ) + def test_dynamical_parallelism(self): + self._check_ptx_loadable("nvrtc") + ker = cupy.RawKernel( + _test_source4, + "test_kernel", + options=("-dc",), + backend=self.backend, + jitify=self.jitify, + ) + N = 169 + inner_chunk = 13 + x = cupy.zeros((N,), dtype=cupy.float32) + ker((1,), (N // inner_chunk,), (x, N, inner_chunk)) + assert (x == 1.0).all() + + def test_dynamical_parallelism_compile_failure(self): + # no option for separate compilation is given should cause an error + ker = cupy.RawKernel( + _test_source4, + "test_kernel", + backend=self.backend, + jitify=self.jitify, + ) + N = 10 + inner_chunk = 2 + x = cupy.zeros((N,), dtype=cupy.float32) + use_ptx = os.environ.get("CUPY_COMPILE_WITH_PTX", False) + if self.jitify: + error = cupy.cuda.compiler.JitifyException + elif self.backend == "nvrtc" and ( + use_ptx + or ( + cupy.cuda.driver._is_cuda_python() + and cupy.cuda.runtime.runtimeGetVersion() < 11010 + ) + or ( + not cupy.cuda.driver._is_cuda_python() + and not cupy.cuda.runtime.is_hip + and cupy.cuda.driver.get_build_version() < 11010 + ) + ): + # raised when calling ls.complete() + error = cupy.cuda.driver.CUDADriverError + else: # nvcc, hipcc, hiprtc + error = cupy.cuda.compiler.CompileException + with pytest.raises(error): + ker((1,), (N // inner_chunk,), (x, N, inner_chunk)) + + @unittest.skipIf( + cupy.cuda.runtime.is_hip, "HIP code should not use cuFloatComplex" + ) + def test_cuFloatComplex(self): + N = 100 + block = 32 + grid = (N + block - 1) // block + dtype = cupy.complex64 + + mod = cupy.RawModule( + code=_test_cuComplex, translate_cucomplex=True, jitify=self.jitify + ) + a = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + a = a.astype(dtype) + b = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + b = b.astype(dtype) + c = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + c = c.astype(dtype) + out = cupy.zeros((N,), dtype=dtype) + out_float = cupy.zeros((N,), dtype=cupy.float32) + out_up = cupy.zeros((N,), dtype=cupy.complex128) + + ker = mod.get_function("test_addf") + ker((grid,), (block,), (a, b, out)) + assert (out == a + b).all() + + ker = mod.get_function("test_subf") + ker((grid,), (block,), (a, b, out)) + assert (out == a - b).all() + + ker = mod.get_function("test_mulf") + ker((grid,), (block,), (a, b, out)) + assert cupy.allclose(out, a * b) + + ker = mod.get_function("test_divf") + ker((grid,), (block,), (a, b, out)) + assert (out == a / b).all() + + ker = mod.get_function("test_conjf") + ker((grid,), (block,), (a, out)) + assert (out == cupy.conj(a)).all() + + ker = mod.get_function("test_absf") + ker((grid,), (block,), (a, out_float)) + assert (out_float == cupy.abs(a)).all() + + ker = mod.get_function("test_fmaf") + ker((grid,), (block,), (a, b, c, out)) + assert cupy.allclose(out, a * b + c) + + ker = mod.get_function("test_makef") + ker((grid,), (block,), (out,)) + # because of precision issue, the (A==B).all() semantics would fail + assert cupy.allclose(out, 1.8 - 1j * 8.7) + + ker = mod.get_function("test_upcast") + ker((grid,), (block,), (a, out_up)) + assert (out_up == a.astype(cupy.complex128)).all() + + # NumPy scalars. + b = cupy.complex64(2 + 3j) + ker = mod.get_function("test_addf_scalar") + ker((grid,), (block,), (a, b, out)) + assert (out == a + b).all() + + @unittest.skipIf( + cupy.cuda.runtime.is_hip, "HIP code should not use cuDoubleComplex" + ) + def test_cuDoubleComplex(self): + N = 100 + block = 32 + grid = (N + block - 1) // block + dtype = cupy.complex128 + + mod = cupy.RawModule( + code=_test_cuComplex, translate_cucomplex=True, jitify=self.jitify + ) + a = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + a = a.astype(dtype) + b = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + b = b.astype(dtype) + c = cupy.random.random((N,)) + 1j * cupy.random.random((N,)) + c = c.astype(dtype) + out = cupy.zeros((N,), dtype=dtype) + out_float = cupy.zeros((N,), dtype=cupy.float64) + out_down = cupy.zeros((N,), dtype=cupy.complex64) + + ker = mod.get_function("test_add") + ker((grid,), (block,), (a, b, out)) + assert (out == a + b).all() + + ker = mod.get_function("test_sub") + ker((grid,), (block,), (a, b, out)) + assert (out == a - b).all() + + ker = mod.get_function("test_mul") + ker((grid,), (block,), (a, b, out)) + assert cupy.allclose(out, a * b) + + ker = mod.get_function("test_div") + ker((grid,), (block,), (a, b, out)) + assert (out == a / b).all() + + ker = mod.get_function("test_conj") + ker((grid,), (block,), (a, out)) + assert (out == cupy.conj(a)).all() + + ker = mod.get_function("test_abs") + ker((grid,), (block,), (a, out_float)) + assert (out_float == cupy.abs(a)).all() + + ker = mod.get_function("test_fma") + ker((grid,), (block,), (a, b, c, out)) + assert cupy.allclose(out, a * b + c) + + ker = mod.get_function("test_make") + ker((grid,), (block,), (out,)) + assert (out == 1.8 - 1j * 8.7).all() + + ker = mod.get_function("test_downcast") + ker((grid,), (block,), (a, out_down)) + assert (out_down == a.astype(cupy.complex64)).all() + + # NumPy scalars. + b = cupy.complex128(2 + 3j) + ker = mod.get_function("test_add_scalar") + ker((grid,), (block,), (a, b, out)) + assert (out == a + b).all() + + # Python scalars. + b = 2 + 3j + ker = mod.get_function("test_add_scalar") + ker((grid,), (block,), (a, b, out)) + assert (out == a + b).all() + + def test_const_memory(self): + mod = cupy.RawModule( + code=test_const_mem, backend=self.backend, jitify=self.jitify + ) + ker = mod.get_function("multiply_by_const") + mem_ptr = mod.get_global("some_array") + const_arr = cupy.ndarray((100,), cupy.float32, mem_ptr) + data = cupy.arange(100, dtype=cupy.float32) + const_arr[...] = data + output_arr = cupy.ones(100, dtype=cupy.float32) + ker((1,), (100,), (output_arr, cupy.int32(100))) + assert (data == output_arr).all() + + def test_template_specialization(self): + if self.backend == "nvcc": + self.skipTest("nvcc does not support template specialization") + + # TODO(leofang): investigate why hiprtc generates a wrong source code + # when the same code is compiled and discarded. It seems hiprtc has + # an internal cache that conflicts with the 2nd compilation attempt. + if cupy.cuda.runtime.is_hip and hasattr(self, "clean_up"): + self.skipTest("skip a potential hiprtc bug") + + # compile code + if cupy.cuda.runtime.is_hip: + # ROCm 5.0 returns HIP_HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID for + # my_sqrt>, so we use thrust::complex + # instead. + name_expressions = [ + "my_sqrt", + "my_sqrt", + "my_sqrt>", + "my_func", + ] + else: + name_expressions = [ + "my_sqrt", + "my_sqrt", + "my_sqrt>", + "my_func", + ] + mod = cupy.RawModule( + code=test_cxx_template, + name_expressions=name_expressions, + jitify=self.jitify, + ) + + dtypes = (cupy.int32, cupy.float32, cupy.complex128, cupy.float64) + for ker_T, dtype in zip(name_expressions, dtypes): + # get specialized kernels + if cupy.cuda.runtime.is_hip: + # TODO(leofang): investigate why getLoweredName has no error + # but returns an empty string for my_sqrt> + mangled_name = mod.module.mapping.get(ker_T) + if mangled_name == "": + continue + ker = mod.get_function(ker_T) + + # prepare inputs & expected outputs + in_arr = cupy.testing.shaped_random((10,), dtype=dtype) + out_arr = in_arr**2 + + # run + ker((1,), (10,), (in_arr, 10)) + + # check results + assert cupy.allclose(in_arr, out_arr) + + def test_template_failure(self): + name_expressions = ["my_sqrt"] + + # 1. nvcc is disabled for this feature + if self.backend == "nvcc": + with pytest.raises(ValueError) as e: + cupy.RawModule( + code=test_cxx_template, + backend=self.backend, + name_expressions=name_expressions, + ) + assert "nvrtc" in str(e.value) + return # the rest of tests do not apply to nvcc + + # 2. compile code without specializations + mod = cupy.RawModule(code=test_cxx_template, jitify=self.jitify) + # ...try to get a specialized kernel + match = ( + "named symbol not found" + if not cupy.cuda.runtime.is_hip + else "hipErrorNotFound" + ) + with pytest.raises(cupy.cuda.driver.CUDADriverError, match=match): + mod.get_function("my_sqrt") + + # 3. try to fetch something we didn't specialize for + mod = cupy.RawModule( + code=test_cxx_template, + name_expressions=name_expressions, + jitify=self.jitify, + ) + if cupy.cuda.runtime.is_hip: + msg = "hipErrorNotFound" + else: + msg = "named symbol not found" + with pytest.raises(cupy.cuda.driver.CUDADriverError, match=msg): + mod.get_function("my_sqrt") + + def test_raw_pointer(self): + mod = cupy.RawModule( + code=test_cast, backend=self.backend, jitify=self.jitify + ) + ker = mod.get_function("my_func") + + a = cupy.ones((100,), dtype=cupy.float64) + memptr = memory.alloc(100 * a.dtype.itemsize) + memptr.copy_from(a.data, 100 * a.dtype.itemsize) # one-initialize + b = cupy.ndarray((100,), cupy.float64, memptr=memptr) + + ker((1,), (100,), (memptr, 100)) + a = 3.0 * a - 8.0 + assert (a == b).all() + + @testing.multi_gpu(2) + def test_context_switch_RawKernel(self): + # run test_basic() on another device + + # we need to launch it once to force compiling + x1, x2, y = self._helper(self.kern, cupy.float32) + + with cupy.cuda.Device(1): + x1, x2, y = self._helper(self.kern, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + @testing.multi_gpu(2) + def test_context_switch_RawModule1(self): + # run test_module() on another device + # in this test, re-compiling happens at 2nd get_function() + module = self.mod2 + with cupy.cuda.Device(0): + module.get_function("test_sum") + + with cupy.cuda.Device(1): + ker_sum = module.get_function("test_sum") + x1, x2, y = self._helper(ker_sum, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + @testing.multi_gpu(2) + def test_context_switch_RawModule2(self): + # run test_module() on another device + # in this test, re-compiling happens at kernel launch + module = self.mod2 + with cupy.cuda.Device(0): + ker_sum = module.get_function("test_sum") + + with cupy.cuda.Device(1): + x1, x2, y = self._helper(ker_sum, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + @testing.multi_gpu(2) + def test_context_switch_RawModule3(self): + # run test_load_cubin() on another device + # generate cubin in the temp dir and load it on device 0 + + device0 = cupy.cuda.Device(0) + device1 = cupy.cuda.Device(1) + if device0.compute_capability != device1.compute_capability: + raise pytest.skip() + + with device0: + file_path = self._generate_file("cubin") + mod = cupy.RawModule(path=file_path, backend=self.backend) + mod.get_function("test_div") + + # in this test, reloading happens at 2nd get_function() + with device1: + ker = mod.get_function("test_div") + x1, x2, y = self._helper(ker, cupy.float32) + assert cupy.allclose(y, x1 / (x2 + 1.0)) + + @testing.multi_gpu(2) + def test_context_switch_RawModule4(self): + # run test_load_cubin() on another device + # generate cubin in the temp dir and load it on device 0 + + device0 = cupy.cuda.Device(0) + device1 = cupy.cuda.Device(1) + if device0.compute_capability != device1.compute_capability: + raise pytest.skip() + + with device0: + file_path = self._generate_file("cubin") + mod = cupy.RawModule(path=file_path, backend=self.backend) + ker = mod.get_function("test_div") + + # in this test, reloading happens at kernel launch + with device1: + x1, x2, y = self._helper(ker, cupy.float32) + assert cupy.allclose(y, x1 / (x2 + 1.0)) + + @testing.multi_gpu(2) + def test_context_switch_RawModule5(self): + # run test_template_specialization() on another device + # in this test, re-compiling happens at get_function() + if self.backend == "nvcc": + self.skipTest("nvcc does not support template specialization") + + # compile code + name_expressions = ["my_sqrt"] + name = name_expressions[0] + with cupy.cuda.Device(0): + mod = cupy.RawModule( + code=test_cxx_template, + name_expressions=name_expressions, + jitify=self.jitify, + ) + + # get specialized kernels + mod.get_function(name) + + # switch device + with cupy.cuda.Device(1): + # get specialized kernels + ker = mod.get_function(name) + + # prepare inputs & expected outputs + in_arr = cupy.testing.shaped_random((10,), dtype=cupy.uint32) + out_arr = in_arr**2 + + # run + ker((1,), (10,), (in_arr, 10)) + + # check results + assert cupy.allclose(in_arr, out_arr) + + @testing.multi_gpu(2) + def test_context_switch_RawModule6(self): + # run test_template_specialization() on another device + # in this test, re-compiling happens at kernel launch + if self.backend == "nvcc": + self.skipTest("nvcc does not support template specialization") + + # compile code + name_expressions = ["my_sqrt"] + name = name_expressions[0] + with cupy.cuda.Device(0): + mod = cupy.RawModule( + code=test_cxx_template, + name_expressions=name_expressions, + jitify=self.jitify, + ) + + # get specialized kernels + ker = mod.get_function(name) + + # switch device + with cupy.cuda.Device(1): + # prepare inputs & expected outputs + in_arr = cupy.testing.shaped_random((10,), dtype=cupy.uint32) + out_arr = in_arr**2 + + # run + ker((1,), (10,), (in_arr, 10)) + + # check results + assert cupy.allclose(in_arr, out_arr) + + @unittest.skipUnless( + not cupy.cuda.runtime.is_hip, "only CUDA raises warning" + ) + def test_compile_kernel(self): + kern = cupy.RawKernel( + _test_compile_src, + "test_op", + options=("-DOP=+",), + backend=self.backend, + jitify=self.jitify, + ) + log = io.StringIO() + with use_temporary_cache_dir(): + kern.compile(log_stream=log) + assert "warning" in log.getvalue() + x1, x2, y = self._helper(kern, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + @unittest.skipUnless( + not cupy.cuda.runtime.is_hip, "only CUDA raises warning" + ) + def test_compile_module(self): + module = cupy.RawModule( + code=_test_compile_src, + backend=self.backend, + options=("-DOP=+",), + jitify=self.jitify, + ) + log = io.StringIO() + with use_temporary_cache_dir(): + module.compile(log_stream=log) + assert "warning" in log.getvalue() + kern = module.get_function("test_op") + x1, x2, y = self._helper(kern, cupy.float32) + assert cupy.allclose(y, x1 + x2) + + +_test_grid_sync = r""" +#include + +extern "C" __global__ +void test_grid_sync(const float* x1, const float* x2, float* y, int n) { + namespace cg = cooperative_groups; + cg::grid_group grid = cg::this_grid(); + int size = gridDim.x * blockDim.x; + int tid = blockDim.x * blockIdx.x + threadIdx.x; + for (int i = tid; i < n; i += size) { + y[i] = x1[i]; + } + cg::sync(grid); + for (int i = n - 1 - tid; i >= 0; i -= size) { + y[i] += x2[i]; + } +} +""" + + +@testing.parameterize( + *testing.product( + { + "n": [10, 100, 1000], + "block": [64, 256], + } + ) +) +@unittest.skipIf( + find_nvcc_ver() >= 12020, "fp16 header compatibility issue, see cupy#8412" +) +@unittest.skipUnless( + 9000 <= cupy.cuda.runtime.runtimeGetVersion(), "Requires CUDA 9.x or later" +) +@unittest.skipUnless( + 60 <= int(cupy.cuda.device.get_compute_capability()), + "Requires compute capability 6.0 or later", +) +@unittest.skipIf(cupy.cuda.runtime.is_hip, "Skip on HIP") +class TestRawGridSync(unittest.TestCase): + + def test_grid_sync_rawkernel(self): + n = self.n + with use_temporary_cache_dir(): + kern_grid_sync = cupy.RawKernel( + _test_grid_sync, + "test_grid_sync", + backend="nvcc", + enable_cooperative_groups=True, + ) + x1 = cupy.arange(n**2, dtype="float32").reshape(n, n) + x2 = cupy.ones((n, n), dtype="float32") + y = cupy.zeros((n, n), dtype="float32") + block = self.block + grid = (n * n + block - 1) // block + kern_grid_sync((grid,), (block,), (x1, x2, y, n**2)) + assert cupy.allclose(y, x1 + x2) + + def test_grid_sync_rawmodule(self): + n = self.n + with use_temporary_cache_dir(): + mod_grid_sync = cupy.RawModule( + code=_test_grid_sync, + backend="nvcc", + enable_cooperative_groups=True, + ) + x1 = cupy.arange(n**2, dtype="float32").reshape(n, n) + x2 = cupy.ones((n, n), dtype="float32") + y = cupy.zeros((n, n), dtype="float32") + kern = mod_grid_sync.get_function("test_grid_sync") + block = self.block + grid = (n * n + block - 1) // block + kern((grid,), (block,), (x1, x2, y, n**2)) + assert cupy.allclose(y, x1 + x2) + + +_test_script = r""" +import pickle +import sys + +import cupy as cp + + +N = 100 +a = cp.random.random(N, dtype=cp.float32) +b = cp.random.random(N, dtype=cp.float32) +c = cp.empty_like(a) +with open('raw.pkl', 'rb') as f: + ker = pickle.load(f) + +if len(sys.argv) == 2: + ker = ker.get_function(sys.argv[1]) + +ker((1,), (100,), (a, b, c, N)) +assert cp.allclose(a + b, c) +assert ker.enable_cooperative_groups +""" + + +# Pickling/unpickling a RawModule should always success, whereas +# pickling/unpickling a RawKernel would fail if we don't enforce +# recompiling after unpickling it. +@testing.parameterize( + *testing.product( + { + "compile": (False, True), + "raw": ("ker", "mod", "mod_ker"), + } + ) +) +@unittest.skipUnless( + 60 <= int(cupy.cuda.device.get_compute_capability()), + "Requires compute capability 6.0 or later", +) +@unittest.skipIf( + cupy.cuda.runtime.is_hip, "HIP does not support enable_cooperative_groups" +) +class TestRawPicklable(unittest.TestCase): + + def setUp(self): + self.temporary_dir_context = use_temporary_cache_dir() + self.temp_dir = self.temporary_dir_context.__enter__() + + # test if kw-only arguments are properly handled or not + if self.raw == "ker": + self.ker = cupy.RawKernel( + _test_source1, + "test_sum", + backend="nvcc", + enable_cooperative_groups=True, + ) + else: + self.mod = cupy.RawModule( + code=_test_source1, + backend="nvcc", + enable_cooperative_groups=True, + ) + + def tearDown(self): + self.temporary_dir_context.__exit__(*sys.exc_info()) + + def _helper(self): + N = 10 + x1 = cupy.arange(N**2, dtype=cupy.float32).reshape(N, N) + x2 = cupy.ones((N, N), dtype=cupy.float32) + y = cupy.zeros((N, N), dtype=cupy.float32) + if self.raw == "ker": + ker = self.ker + else: + ker = self.mod.get_function("test_sum") + ker((N,), (N,), (x1, x2, y, N**2)) + assert cupy.allclose(x1 + x2, y) + + def test_raw_picklable(self): + # force compiling before pickling + if self.compile: + self._helper() + + if self.raw == "ker": + # pickle the RawKernel + obj = self.ker + elif self.raw == "mod": + # pickle the RawModule + obj = self.mod + elif self.raw == "mod_ker": + # pickle the RawKernel fetched from the RawModule + obj = self.mod.get_function("test_sum") + with open(self.temp_dir + "/raw.pkl", "wb") as f: + pickle.dump(obj, f) + + # dump test script to temp dir + with open(self.temp_dir + "/TestRawPicklable.py", "w") as f: + f.write(_test_script) + test_args = ["test_sum"] if self.raw == "mod" else [] + + # run another process to check the pickle + s = subprocess.run( + [sys.executable, "TestRawPicklable.py"] + test_args, + cwd=self.temp_dir, + ) + s.check_returncode() # raise if unsuccessful + + +# a slightly more realistic kernel involving std utilities +std_code = r""" +#include + +template::value>::type> +__global__ void shift (T* a, int N) { + unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < N) { + a[tid] += 100; + } +} +""" + + +@testing.parameterize( + *testing.product( + { + "jitify": (False, True), + } + ) +) +@unittest.skipIf(cupy.cuda.runtime.is_hip, "Jitify does not support ROCm/HIP") +class TestRawJitify(unittest.TestCase): + + def setUp(self): + self.temporary_dir_context = use_temporary_cache_dir() + self.temp_dir = self.temporary_dir_context.__enter__() + + def tearDown(self): + self.temporary_dir_context.__exit__(*sys.exc_info()) + + def _helper(self, header, options=()): + code = header + code += _test_source1 + mod1 = cupy.RawModule( + code=code, backend="nvrtc", options=options, jitify=self.jitify + ) + + N = 10 + x1 = cupy.arange(N**2, dtype=cupy.float32).reshape(N, N) + x2 = cupy.ones((N, N), dtype=cupy.float32) + y = cupy.zeros((N, N), dtype=cupy.float32) + ker = mod1.get_function("test_sum") + ker((N,), (N,), (x1, x2, y, N**2)) + assert cupy.allclose(x1 + x2, y) + + def _helper2(self, type_str): + mod2 = cupy.RawModule( + code=std_code, + jitify=self.jitify, + name_expressions=("shift<%s>" % type_str,), + ) + ker = mod2.get_function("shift<%s>" % type_str) + N = 256 + a = cupy.random.random_integers(0, 7, N).astype(cupy.int32) + b = a.copy() + ker((1,), (N,), (a, N)) + assert cupy.allclose(a, b + 100) + + def test_jitify1(self): + # simply prepend an unused header + hdr = "#include \n" + # Starting CUDA 12.2, fp16/bf16 headers are intertwined, but due to + # license issue we can't yet bundle bf16 headers. CUB offers us a + # band-aid solution to avoid including the latter (NVIDIA/cub#478, + # nvbugs 3641496). + options = ("-DCUB_DISABLE_BF16_SUPPORT",) + + # Compiling CUB headers now works with or without Jitify. + self._helper(hdr, options) + + def test_jitify2(self): + # NVRTC cannot compile any code involving std + if self.jitify: + # Jitify will make it work + self._helper2("int") + else: + with pytest.raises(cupy.cuda.compiler.CompileException) as ex: + self._helper2("int") + assert "cannot open source file" in str(ex.value) + + def test_jitify3(self): + # We supply a type impossible to specialize. Jitify is still able to + # locate the headers, but when it comes to the actual compilation, + # NVRTC fails (raising the same exception) with different error + # messages. + ex_type = cupy.cuda.compiler.CompileException + with pytest.raises(ex_type) as ex: + self._helper2("float") + if self.jitify: + assert "Error in parsing name expression" in str(ex.value) + else: + assert "cannot open source file" in str(ex.value) + + def test_jitify4(self): + # ensure JitifyException is raised with a broken code + code = r""" + __global__ void i_am_broken() { + """ + + if self.jitify: + ex_type = cupy.cuda.compiler.JitifyException + else: + ex_type = cupy.cuda.compiler.CompileException + + with pytest.raises(ex_type): + mod = cupy.RawModule(code=code, jitify=self.jitify) + ker = mod.get_function("i_am_broken") + # if Jitify could redirect its output, we would be able to check + # the error log here as well (NVIDIA/jitify#79) + + def test_jitify5(self): + # If including a header that does not exist, Jitify would attempt to + # comment it out and proceed. If this header is actually unused, then + # everything would run just fine. + + hdr = "I_INCLUDE_SOMETHING.h" + with open(self.temp_dir + "/" + hdr, "w") as f: + dummy = "#include \n" + f.write(dummy) + hdr = '#include "' + hdr + '"\n' + + if self.jitify: + # Jitify would print a warning "[jitify] File not found" to stdout, + # but as mentioned above and elsewhere, we can't capture it. + self._helper(hdr, options=("-I" + self.temp_dir,)) + else: + with pytest.raises(cupy.cuda.compiler.CompileException) as ex: + self._helper(hdr, options=("-I" + self.temp_dir,)) + assert "cannot open source file" in str(ex.value) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_reduction.py b/dpnp/tests/third_party/cupy/core_tests/test_reduction.py new file mode 100644 index 000000000000..0b97bc04ad06 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_reduction.py @@ -0,0 +1,260 @@ +import unittest + +import numpy +import pytest +from dpctl.tensor._numpy_helper import AxisError + +import dpnp as cupy + +# import cupy._core._accelerator as _acc +# from cupy import _core +from dpnp.tests.third_party.cupy import testing + +if numpy.lib.NumpyVersion(numpy.__version__) >= "2.0.0b1": + from numpy.exceptions import ComplexWarning +else: + from numpy import ComplexWarning + +pytest.skip( + "create/get_reduction_func() and ReductionKernel are not supported", + allow_module_level=True, +) + +_noncontiguous_params = [ + # reduce at head axes + {"shape": (2, 4, 3), "trans": (2, 1, 0), "axis": (0, 1)}, + # reduce at middle axes + {"shape": (2, 4, 5, 3), "trans": (3, 2, 1, 0), "axis": (1, 2)}, + # reduce at tail axes + {"shape": (2, 4, 3), "trans": (2, 1, 0), "axis": (1, 2)}, + # out_axis = (0,) + {"shape": (0, 4, 3), "trans": (2, 1, 0), "axis": (0, 1)}, + # out_axis = () + {"shape": (2, 4, 3), "trans": (2, 1, 0), "axis": (0, 1, 2)}, +] + + +class AbstractReductionTestBase: + + def get_sum_func(self): + raise NotImplementedError() + + @testing.numpy_cupy_allclose(contiguous_check=False) + def check_int8_sum(self, shape, xp, axis=None, keepdims=False, trans=None): + a = testing.shaped_random(shape, xp, "b") + if trans: + a = a.transpose(*trans) + sum_func = self.get_sum_func() + if xp == cupy: + return sum_func(a, axis=axis, keepdims=keepdims) + else: + return a.sum(axis=axis, keepdims=keepdims, dtype="b") + + +class SimpleReductionFunctionTestBase(AbstractReductionTestBase): + + def get_sum_func(self): + return _core.create_reduction_func( + "my_sum", ("b->b",), ("in0", "a + b", "out0 = a", None), 0 + ) + + +class TestSimpleReductionFunction( + unittest.TestCase, SimpleReductionFunctionTestBase +): + def test_shape1(self): + for i in range(1, 10): + self.check_int8_sum((2**i,)) + self.check_int8_sum((2**i - 1,)) + self.check_int8_sum((2**i + 1,)) + + def test_shape2(self): + for i in range(1, 10): + self.check_int8_sum((2**i, 1000), axis=0) + self.check_int8_sum((2**i - 1, 1000), axis=0) + self.check_int8_sum((2**i + 1, 1000), axis=0) + + def test_shape3(self): + for i in range(1, 10): + self.check_int8_sum((2**i, 1000), axis=1) + self.check_int8_sum((2**i - 1, 1000), axis=1) + self.check_int8_sum((2**i + 1, 1000), axis=1) + + def test_shape4(self): + self.check_int8_sum((512, 256 * 256), axis=0) + self.check_int8_sum((512, 256 * 256), axis=1) + + self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=0) + self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=1) + + def test_shape5(self): + block_size = 512 + size = (2 << 32) // block_size + self.check_int8_sum((size, 1), axis=1) + self.check_int8_sum((size, 1), axis=0) + + +@testing.parameterize(*_noncontiguous_params) +class TestSimpleReductionFunctionNonContiguous( + SimpleReductionFunctionTestBase, unittest.TestCase +): + + def test_noncontiguous(self): + self.check_int8_sum(self.shape, trans=self.trans, axis=self.axis) + + +@testing.parameterize( + *testing.product( + { + "backend": ([], ["cub"]), + } + ) +) +class TestSimpleReductionFunctionComplexWarning(unittest.TestCase): + + def setUp(self): + self.accelerators = _core.get_reduction_accelerators() + _core.set_reduction_accelerators(self.backend) + + def tearDown(self): + _core.set_reduction_accelerators(self.accelerators) + + @testing.for_complex_dtypes(name="c_dtype") + @testing.for_float_dtypes(name="f_dtype") + @testing.numpy_cupy_allclose() + def test_warns(self, xp, c_dtype, f_dtype): + with pytest.warns(ComplexWarning): + out = xp.ones((8,), dtype=c_dtype).sum(dtype=f_dtype) + return out + + +class TestSimpleReductionFunctionInvalidAxis: + @pytest.mark.parametrize( + "axis", + [ + 2, + (-3,), + (0, 7), + ], + ) + def test_axis_overrun(self, axis): + for xp in (numpy, cupy): + a = xp.ones((2, 2)) + with pytest.raises(AxisError): + a.sum(axis=axis) + + @pytest.mark.parametrize( + "axis", + [ + (1, 1), + (0, -2), + ], + ) + def test_axis_repeated(self, axis): + for xp in (numpy, cupy): + a = xp.ones((2, 2)) + with pytest.raises(ValueError): + a.sum(axis=axis) + + +class ReductionKernelTestBase(AbstractReductionTestBase): + + def get_sum_func(self): + return cupy.ReductionKernel( + "T x", "T out", "x", "a + b", "out = a", "0", "my_sum" + ) + + +class TestReductionKernel(ReductionKernelTestBase, unittest.TestCase): + + def test_shape1(self): + for i in range(1, 10): + self.check_int8_sum((2**i,)) + self.check_int8_sum((2**i - 1,)) + self.check_int8_sum((2**i + 1,)) + + def test_shape2(self): + for i in range(1, 10): + self.check_int8_sum((2**i, 1000), axis=0) + self.check_int8_sum((2**i - 1, 1000), axis=0) + self.check_int8_sum((2**i + 1, 1000), axis=0) + + def test_shape3(self): + for i in range(1, 10): + self.check_int8_sum((2**i, 1000), axis=1) + self.check_int8_sum((2**i - 1, 1000), axis=1) + self.check_int8_sum((2**i + 1, 1000), axis=1) + + def test_shape4(self): + self.check_int8_sum((512, 256 * 256), axis=0) + self.check_int8_sum((512, 256 * 256), axis=1) + self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=0) + self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=1) + + +@testing.parameterize(*_noncontiguous_params) +class TestReductionKernelNonContiguous( + ReductionKernelTestBase, unittest.TestCase +): + + def test_noncontiguous(self): + self.check_int8_sum(self.shape, trans=self.trans, axis=self.axis) + + +class TestReductionKernelInvalidArgument(unittest.TestCase): + + def test_invalid_kernel_name(self): + with self.assertRaisesRegex(ValueError, "Invalid kernel name"): + cupy.ReductionKernel( + "T x", "T y", "x", "a + b", "y = a", "0", name="1" + ) + + +class TestReductionKernelCachedCode: + + @pytest.fixture(autouse=True) + def setUp(self): + self.old_routine_accelerators = _acc.get_routine_accelerators() + self.old_reduction_accelerators = _acc.get_reduction_accelerators() + # Disable CUB + _acc.set_reduction_accelerators([]) + _acc.set_routine_accelerators([]) + yield + _acc.set_routine_accelerators(self.old_routine_accelerators) + _acc.set_reduction_accelerators(self.old_reduction_accelerators) + + def test_cached_code(self): + kernel = cupy.ReductionKernel( + "T x", "T y", "x", "a + b", "y = a", "0", name="cached_code" + ) + assert len(kernel._cached_codes) == 0 + x = cupy.arange(10) + kernel(x) + assert len(kernel._cached_codes) == 1 + kernel(x) + assert len(kernel._cached_codes) == 1 + kernel(x.astype(cupy.float32)) + assert len(kernel._cached_codes) == 2 + + def test_simple_cached_code(self): + kernel = _core.create_reduction_func( + "my_sum", ("q->q", "f->f"), ("in0", "a + b", "out0 = a", None), 0 + ) + assert len(kernel._cached_codes) == 0 + x = cupy.arange(10) + kernel(x) + assert len(kernel._cached_codes) == 1 + kernel(x) + assert len(kernel._cached_codes) == 1 + kernel(x.astype(cupy.float32)) + assert len(kernel._cached_codes) == 2 + + +class TestLargeMultiDimReduction(ReductionKernelTestBase, unittest.TestCase): + + def test_large_dims_keep_kernels(self): + # This test creates a CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES + # if the output array dims are not reduced + shape = (4, 3, 2, 4, 3, 2, 2) + axis = (1, 4, 3, 6) + self.check_int8_sum(shape, axis=axis, keepdims=True) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_scan.py b/dpnp/tests/third_party/cupy/core_tests/test_scan.py new file mode 100644 index 000000000000..f371a1f6253c --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_scan.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +import unittest + +import pytest + +import dpnp as cupy + +# from cupy import cuda +# from cupy._core._routines_math import _scan_for_test as scan +from dpnp.tests.third_party.cupy import testing + +pytest.skip("scan() is not supported", allow_module_level=True) + + +class TestScan(unittest.TestCase): + + @testing.for_all_dtypes() + def test_scan(self, dtype): + element_num = 10000 + + if dtype in {cupy.int8, cupy.uint8, cupy.float16}: + element_num = 100 + + a = cupy.ones((element_num,), dtype=dtype) + prefix_sum = scan(a) + expect = cupy.arange(start=1, stop=element_num + 1).astype(dtype) + + testing.assert_array_equal(prefix_sum, expect) + + def test_check_1d_array(self): + with self.assertRaises(TypeError): + a = cupy.zeros((2, 2)) + scan(a) + + @testing.multi_gpu(2) + def test_multi_gpu(self): + with cuda.Device(0): + a = cupy.zeros((10,)) + scan(a) + with cuda.Device(1): + a = cupy.zeros((10,)) + scan(a) + + @testing.for_all_dtypes() + def test_scan_out(self, dtype): + element_num = 10000 + + if dtype in {cupy.int8, cupy.uint8, cupy.float16}: + element_num = 100 + + a = cupy.ones((element_num,), dtype=dtype) + b = cupy.zeros_like(a) + scan(a, b) + expect = cupy.arange(start=1, stop=element_num + 1).astype(dtype) + + testing.assert_array_equal(b, expect) + + scan(a, a) + testing.assert_array_equal(a, expect) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_syncdetect.py b/dpnp/tests/third_party/cupy/core_tests/test_syncdetect.py new file mode 100644 index 000000000000..57f64a7b1661 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_syncdetect.py @@ -0,0 +1,36 @@ +import unittest + +import pytest + +import dpnp as cupy + +# import cupyx + +pytest.skip("get() method is not supported", allow_module_level=True) + + +class TestSyncDetect(unittest.TestCase): + + def test_disallowed(self): + a = cupy.array([2, 3]) + with cupyx.allow_synchronize(False): + with pytest.raises(cupyx.DeviceSynchronized): + a.get() + + def test_allowed(self): + a = cupy.array([2, 3]) + with cupyx.allow_synchronize(True): + a.get() + + def test_nested_disallowed(self): + a = cupy.array([2, 3]) + with cupyx.allow_synchronize(True): + with cupyx.allow_synchronize(False): + with pytest.raises(cupyx.DeviceSynchronized): + a.get() + + def test_nested_allowed(self): + a = cupy.array([2, 3]) + with cupyx.allow_synchronize(False): + with cupyx.allow_synchronize(True): + a.get() diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ufunc_methods.py b/dpnp/tests/third_party/cupy/core_tests/test_ufunc_methods.py new file mode 100644 index 000000000000..387b534d98e2 --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_ufunc_methods.py @@ -0,0 +1,246 @@ +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + + +class TestUfuncOuter: + + @testing.numpy_cupy_array_equal() + def test_add_outer(self, xp): + x = testing.shaped_random((2, 3), xp=xp, dtype=numpy.int32, seed=0) + y = testing.shaped_random((4, 1, 5), xp=xp, dtype=numpy.int32, seed=1) + return xp.add.outer(x, y) + + @pytest.mark.skip("Scalar input is not supported") + @testing.numpy_cupy_array_equal() + def test_add_outer_scalar(self, xp): + return xp.add.outer(2, 3) + + +@pytest.mark.skip("at() method is not supported") +class TestUfuncAtAtomicOps: + + @testing.for_dtypes("iIQefd") + @testing.numpy_cupy_array_equal() + def test_at_add(self, xp, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask)[0] + xp.add.at(x, indices, 3) + return x + + @testing.for_dtypes("iIQefd") + @testing.numpy_cupy_array_equal() + def test_at_add_duplicate_indices(self, xp, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + xp.add.at(x, indices, 3) + return x + + @testing.for_dtypes("iI") + @testing.numpy_cupy_array_equal() + def test_at_subtract(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask)[0] + xp.subtract.at(x, indices, 3) + return x + + @testing.for_dtypes("iI") + @testing.numpy_cupy_array_equal() + def test_at_subtract_duplicate_indices(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + xp.subtract.at(x, indices, 3) + return x + + @testing.for_dtypes("iIQfd") + @testing.numpy_cupy_allclose() + def test_at_min(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask)[0] + xp.minimum.at(x, indices, 3) + return x + + @testing.for_dtypes("iIQfd") + @testing.numpy_cupy_allclose() + def test_at_min_duplicate_indices(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + values = testing.shaped_random( + indices.shape, xp=xp, dtype=dtype, seed=2 + ) + xp.minimum.at(x, indices, values) + return x + + @testing.for_dtypes("iIQfd") + @testing.numpy_cupy_allclose() + def test_at_max(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask)[0] + xp.maximum.at(x, indices, 3) + return x + + @testing.for_dtypes("iIQfd") + @testing.numpy_cupy_allclose() + def test_at_max_duplicate_indices(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + values = testing.shaped_random( + indices.shape, xp=xp, dtype=dtype, seed=2 + ) + xp.maximum.at(x, indices, values) + return x + + @testing.for_dtypes("iIlLqQ") + @testing.numpy_cupy_array_equal() + def test_at_bitwise_and(self, xp, dtype): + if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq": + pytest.skip("atomicOr does not support int64 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + values = testing.shaped_random( + indices.shape, xp=xp, dtype=dtype, seed=2 + ) + xp.bitwise_and.at(x, indices, values) + return x + + @testing.for_dtypes("iIlLqQ") + @testing.numpy_cupy_array_equal() + def test_at_bitwise_or(self, xp, dtype): + if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq": + pytest.skip("atomicOr does not support int64 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + values = testing.shaped_random( + indices.shape, xp=xp, dtype=dtype, seed=2 + ) + xp.bitwise_or.at(x, indices, values) + return x + + @testing.for_dtypes("iIlLqQ") + @testing.numpy_cupy_array_equal() + def test_at_bitwise_xor(self, xp, dtype): + if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq": + pytest.skip("atomicXor does not support int64 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1 + ) + values = testing.shaped_random( + indices.shape, xp=xp, dtype=dtype, seed=2 + ) + xp.bitwise_xor.at(x, indices, values) + return x + + @testing.for_dtypes("iIQefd") + @testing.numpy_cupy_array_equal() + def test_at_boolean_mask(self, xp, dtype): + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + xp.add.at(x, mask, 3) + return x + + @testing.for_dtypes("iIQefd") + @testing.numpy_cupy_array_equal() + def test_at_array_values(self, xp, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (50,) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask)[0] + values = testing.shaped_random( + indices.shape, xp=xp, dtype=numpy.int32, seed=2 + ) + xp.add.at(x, indices, values) + return x + + @testing.for_dtypes("iIQefd") + @testing.numpy_cupy_array_equal() + def test_at_multi_dimensional(self, xp, dtype): + if cupy.cuda.runtime.is_hip and dtype == numpy.float16: + pytest.skip("atomicAdd does not support float16 in HIP") + shape = (20, 30) + x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0) + mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1) + indices = xp.nonzero(mask) + xp.add.at(x, indices, 3) + return x + + +@pytest.mark.skip("reduce() method is not supported") +class TestUfuncReduce: + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6}) + def test_reduce_add(self, xp, dtype): + x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0) + return xp.add.reduce(x, axis=-1) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6}) + def test_multiply_add(self, xp, dtype): + x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0) + return xp.multiply.reduce(x, axis=-1) + + +@pytest.mark.skip("accumulate() method is not supported") +class TestUfuncAccumulate: + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6}) + def test_reduce_add(self, xp, dtype): + x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0) + return xp.add.accumulate(x, axis=-1) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6}) + def test_multiply_add(self, xp, dtype): + x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0) + return xp.multiply.accumulate(x, axis=-1) + + +@pytest.mark.skip("reduceat() method is not supported") +class TestUfuncReduceAt: + + @testing.for_all_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_reduce_add(self, xp, dtype): + x = testing.shaped_random((3, 4, 5), xp=xp, dtype=dtype, seed=0) + indices = testing.shaped_random( + (20,), xp=xp, dtype=numpy.int32, scale=4, seed=1 + ) + return xp.add.reduceat(x, indices, axis=1) diff --git a/dpnp/tests/third_party/cupy/core_tests/test_userkernel.py b/dpnp/tests/third_party/cupy/core_tests/test_userkernel.py new file mode 100644 index 000000000000..ab184c4939ff --- /dev/null +++ b/dpnp/tests/third_party/cupy/core_tests/test_userkernel.py @@ -0,0 +1,392 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# from cupy.cuda import runtime +# from cupy.cuda.texture import (ChannelFormatDescriptor, CUDAarray, +# ResourceDescriptor, TextureDescriptor, +# TextureObject,) + +pytest.skip("ElementwiseKernel() is not supported", allow_module_level=True) + + +class TestUserkernel(unittest.TestCase): + + def test_manual_indexing(self, n=100): + in1 = cupy.random.uniform(-1, 1, n).astype(cupy.float32) + in2 = cupy.random.uniform(-1, 1, n).astype(cupy.float32) + uesr_kernel_1 = cupy.ElementwiseKernel( + "T x, T y", + "T z", + """ + z = x + y; + """, + "uesr_kernel_1", + ) + out1 = uesr_kernel_1(in1, in2) + + uesr_kernel_2 = cupy.ElementwiseKernel( + "raw T x, raw T y", + "raw T z", + """ + z[i] = x[i] + y[i]; + """, + "uesr_kernel_2", + ) + out2 = uesr_kernel_2(in1, in2, size=n) + + testing.assert_array_equal(out1, out2) + + def test_python_scalar(self): + for typ in (int, float, bool): + dtype = numpy.dtype(typ).type + in1_cpu = numpy.random.randint(0, 1, (4, 5)).astype(dtype) + in1 = cupy.array(in1_cpu) + scalar_value = typ(2) + uesr_kernel_1 = cupy.ElementwiseKernel( + "T x, T y", + "T z", + """ + z = x + y; + """, + "uesr_kernel_1", + ) + out1 = uesr_kernel_1(in1, scalar_value) + + expected = in1_cpu + dtype(2) + testing.assert_array_equal(out1, expected) + + @testing.for_all_dtypes() + def test_numpy_scalar(self, dtype): + in1_cpu = numpy.random.randint(0, 1, (4, 5)).astype(dtype) + in1 = cupy.array(in1_cpu) + scalar_value = dtype(2) + uesr_kernel_1 = cupy.ElementwiseKernel( + "T x, T y", + "T z", + """ + z = x + y; + """, + "uesr_kernel_1", + ) + out1 = uesr_kernel_1(in1, scalar_value) + + expected = in1_cpu + dtype(2) + testing.assert_array_equal(out1, expected) + + def test_cached_code(self): + in1 = cupy.random.uniform(-1, 1, 100).astype(cupy.float32) + in2 = cupy.random.uniform(-1, 1, 100).astype(cupy.float32) + user_kernel_1 = cupy.ElementwiseKernel( + "T x, T y", + "T z", + """ + z = x + y; + """, + "uesr_kernel_1", + ) + assert len(user_kernel_1._cached_codes) == 0 + user_kernel_1(in1, in2) + assert len(user_kernel_1._cached_codes) == 1 + user_kernel_1(in1, in2) + assert len(user_kernel_1._cached_codes) == 1 + user_kernel_1(in1.astype(cupy.float64), in2.astype(cupy.float64)) + assert len(user_kernel_1._cached_codes) == 2 + + +class TestElementwiseKernelSize(unittest.TestCase): + # Tests to check whether size argument raises ValueError correctly + # depending on the raw specifiers of a user kernel. + + def setUp(self): + self.arr1 = cupy.array([1, 2], dtype="float32") + self.arr2 = cupy.array([3, 4], dtype="float32") + + def raises_size_not_allowed(self): + return pytest.raises(ValueError, match=r"^Specified \'size\' can") + + def raises_size_required(self): + return pytest.raises(ValueError, match=r"^Loop size is undecided\.") + + def create_kernel(self, input_raw, output_raw): + # Creates a no-op kernel with given parameter specification. + # input_raw and output_raw are tuples of True/False whose + # corresponding parameter will be designated as 'raw' if True. + input_types = ", ".join( + [ + "{}float32 x{}".format(("raw " if raw else ""), i) + for i, raw in enumerate(input_raw) + ] + ) + output_types = ", ".join( + [ + "{}float32 y{}".format(("raw " if raw else ""), i) + for i, raw in enumerate(output_raw) + ] + ) + return cupy.ElementwiseKernel(input_types, output_types, "", "kernel") + + def test_all_raws(self): + # Input arrays are all raw -> size required + kernel1 = self.create_kernel((True, True), (False,)) + kernel1(self.arr1, self.arr2, size=2) + with self.raises_size_required(): + kernel1(self.arr1, self.arr2) + kernel2 = self.create_kernel((True, True), (True,)) + kernel2(self.arr1, self.arr2, size=2) + with self.raises_size_required(): + kernel2(self.arr1, self.arr2) + + def test_all_nonraws(self): + # All arrays are not raw -> size not allowed + kernel1 = self.create_kernel((False, False), (False,)) + with self.raises_size_not_allowed(): + kernel1(self.arr1, self.arr2, size=2) + kernel2 = self.create_kernel((False, False), (True,)) + with self.raises_size_not_allowed(): + kernel2(self.arr1, self.arr2, size=2) + + def test_some_nonraws(self): + # Some arrays are not raw -> size not allowed + kernel1 = self.create_kernel((True, False), (False,)) + with self.raises_size_not_allowed(): + kernel1(self.arr1, self.arr2, size=2) + kernel2 = self.create_kernel((False, True), (False,)) + with self.raises_size_not_allowed(): + kernel2(self.arr1, self.arr2, size=2) + kernel3 = self.create_kernel((True, False), (True,)) + with self.raises_size_not_allowed(): + kernel3(self.arr1, self.arr2, size=2) + kernel4 = self.create_kernel((False, True), (True,)) + with self.raises_size_not_allowed(): + kernel4(self.arr1, self.arr2, size=2) + + def test_scalars_and_nonraws(self): + # Combination of scalars and non-raw arrays -> size not allowed + kernel1 = self.create_kernel((False, False), (False,)) + with self.raises_size_not_allowed(): + kernel1(self.arr1, 7, size=2) + kernel2 = self.create_kernel((False, False), (False,)) + with self.raises_size_not_allowed(): + kernel2(7, self.arr1, size=2) + kernel3 = self.create_kernel((False, False), (True,)) + with self.raises_size_not_allowed(): + kernel3(self.arr1, 7, size=2) + kernel4 = self.create_kernel((False, False), (True,)) + with self.raises_size_not_allowed(): + kernel4(7, self.arr1, size=2) + + def test_scalars_and_raws_and_nonraws(self): + # Combination of scalars and raw arrays and non-raw arrays + # -> size not allowed + kernel1 = self.create_kernel((False, False, True), (False,)) + with self.raises_size_not_allowed(): + kernel1(self.arr1, 7, self.arr2, size=2) + kernel2 = self.create_kernel((False, False, True), (True,)) + with self.raises_size_not_allowed(): + kernel2(self.arr1, 7, self.arr2, size=2) + + def test_scalars_and_raws(self): + # Combination of scalars and raw arrays -> size required + kernel1 = self.create_kernel((True, False), (False,)) + kernel1(self.arr1, 7, size=2) + with self.raises_size_required(): + kernel1(self.arr1, 7) + kernel2 = self.create_kernel((False, True), (False,)) + kernel2(7, self.arr1, size=2) + with self.raises_size_required(): + kernel2(7, self.arr1) + kernel3 = self.create_kernel((True, False), (True,)) + kernel3(self.arr1, 7, size=2) + with self.raises_size_required(): + kernel3(self.arr1, 7) + kernel4 = self.create_kernel((False, True), (True,)) + kernel4(7, self.arr1, size=2) + with self.raises_size_required(): + kernel4(7, self.arr1) + + def test_size_determined_by_output(self): + # All the input args are unsized, but the size can be determined by the + # output arg. size argument is not allowed. + + # Raw input + kernel1 = self.create_kernel((True,), (False,)) + kernel1(self.arr1, self.arr2) + with self.raises_size_not_allowed(): + kernel1(self.arr1, self.arr2, size=2) + + # Scalar input + kernel2 = self.create_kernel((False,), (False,)) + kernel2(self.arr1, self.arr2) + with self.raises_size_not_allowed(): + kernel2(7, self.arr2, size=2) + + # No input + kernel3 = self.create_kernel((), (False,)) + kernel3(self.arr1) + with self.raises_size_not_allowed(): + kernel3(self.arr1, size=2) + + def test_no_input_and_raw_output(self): + # No input and the given output is raw -> size required + kernel1 = self.create_kernel((), (True,)) + kernel1(self.arr1, size=2) + with self.raises_size_required(): + kernel1(self.arr1) + + +@testing.parameterize( + *testing.product( + { + "value": [-1, 2**32, 2**63 - 1, -(2**63)], + } + ) +) +class TestUserkernelScalar(unittest.TestCase): + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_scalar(self, xp, dtype): + x = testing.shaped_arange((2, 3, 4), xp, dtype) + if xp is numpy: + y = numpy.array(self.value).astype(dtype) + return x + y + else: + kernel = cupy.ElementwiseKernel("T x, T y", "T z", "z = x + y") + return kernel(x, self.value) + + +class TestUserkernelManualBlockSize(unittest.TestCase): + + def test_invalid_block_size(self): + x = testing.shaped_arange((2, 3, 4), cupy, cupy.float32) + kernel = cupy.ElementwiseKernel("T x, T y", "T z", "z = x + y") + with pytest.raises(ValueError): + kernel(x, 1, block_size=0) + + def test_block_size(self): + x = testing.shaped_arange((2, 3, 4), cupy, cupy.float32) + kernel = cupy.ElementwiseKernel("T x, T y", "T z", "z = x + y") + y = kernel(x, 1, block_size=1) + testing.assert_array_equal(y, x + 1) + + +@testing.parameterize( + *testing.product( + { + "dimensions": ((64, 0, 0), (64, 32, 0), (64, 32, 19)), + } + ) +) +@pytest.mark.skipif( + runtime.is_hip, reason="texture support on HIP is not yet implemented" +) +class TestElementwiseKernelTexture(unittest.TestCase): + + def _prep_texture(self): + width, height, depth = self.dimensions + dim = 3 if depth != 0 else 2 if height != 0 else 1 + + # generate input data and allocate output buffer + shape = ( + (depth, height, width) + if dim == 3 + else (height, width) if dim == 2 else (width,) + ) + self.shape = shape + + # prepare input, output, and texture memory + # self.data holds the data stored in the texture memory + tex_data = cupy.random.random(shape, dtype=cupy.float32) + ch = ChannelFormatDescriptor( + 32, 0, 0, 0, runtime.cudaChannelFormatKindFloat + ) + arr = CUDAarray(ch, width, height, depth) + arr.copy_from(tex_data) + self.data = tex_data + + # create resource and texture descriptors + res = ResourceDescriptor(runtime.cudaResourceTypeArray, cuArr=arr) + address_mode = ( + runtime.cudaAddressModeClamp, + runtime.cudaAddressModeClamp, + ) + tex = TextureDescriptor( + address_mode, + runtime.cudaFilterModePoint, + runtime.cudaReadModeElementType, + ) + + # create a texture object + return TextureObject(res, tex) + + def _prep_kernel1D(self): + return cupy.ElementwiseKernel( + "T x, U texObj", + "T y", + """ + T temp = tex1D(texObj, + float(i) + ); + y = temp + x; + """, + name="test_tex1D", + ) + + def _prep_kernel2D(self): + return cupy.ElementwiseKernel( + "T x, U texObj, uint64 width", + "T y", + """ + T temp = tex2D(texObj, + (float)(i % width), + (float)(i / width) + ); + y = temp + x; + """, + name="test_tex2D", + ) + + def _prep_kernel3D(self): + return cupy.ElementwiseKernel( + "T x, U texObj, uint64 width, uint64 height", + "T y", + """ + T temp = tex3D(texObj, + (float)((i % (width * height)) % width), + (float)((i % (width * height)) / width), + (float)((i / (width * height))) + ); + y = temp + x; + """, + name="test_tex3D", + ) + + def test_texture_input(self): + width, height, depth = self.dimensions + dim = 3 if depth != 0 else 2 if height != 0 else 1 + + texobj = self._prep_texture() + ker = getattr(self, f"_prep_kernel{dim}D")() + + # prepare input + args = [None, texobj] + size = width + if height > 0: + size *= height + args.append(width) + if depth > 0: + size *= depth + args.append(height) + in_arr = cupy.arange(size, dtype=cupy.float32) + in_arr = in_arr.reshape(self.shape) + args[0] = in_arr + + # compute and validate output + out_arr = ker(*args) + expected = in_arr + self.data + testing.assert_allclose(out_arr, expected) diff --git a/dpnp/tests/third_party/cupy/creation_tests/test_basic.py b/dpnp/tests/third_party/cupy/creation_tests/test_basic.py index 8f76696bc5e1..e29dd668b0d8 100644 --- a/dpnp/tests/third_party/cupy/creation_tests/test_basic.py +++ b/dpnp/tests/third_party/cupy/creation_tests/test_basic.py @@ -28,6 +28,7 @@ def test_empty_huge_size(self): assert (a == 123).all() # Free huge memory for slow test del a + # cupy.get_default_memory_pool().free_all_blocks() @testing.slow def test_empty_huge_size_fill0(self): @@ -36,6 +37,7 @@ def test_empty_huge_size_fill0(self): assert (a == 0).all() # Free huge memory for slow test del a + # cupy.get_default_memory_pool().free_all_blocks() @testing.for_CF_orders() @testing.for_all_dtypes() @@ -221,8 +223,8 @@ def test_zeros_int(self, xp, dtype, order): @testing.for_CF_orders() def test_zeros_strides(self, order): - a = numpy.zeros((2, 3), dtype="f", order=order) - b = cupy.zeros((2, 3), dtype="f", order=order) + a = numpy.zeros((2, 3), dtype=cupy.default_float_type(), order=order) + b = cupy.zeros((2, 3), dtype=cupy.default_float_type(), order=order) b_strides = tuple(x * b.itemsize for x in b.strides) assert b_strides == a.strides @@ -306,6 +308,7 @@ def test_full_like_subok(self): ) ) class TestBasicReshape: + @testing.with_requires("numpy>=1.17.0") @testing.for_orders("CFAK") @testing.for_all_dtypes() diff --git a/dpnp/tests/third_party/cupy/creation_tests/test_from_data.py b/dpnp/tests/third_party/cupy/creation_tests/test_from_data.py index 679f9260fab7..f0ea1a6f996b 100644 --- a/dpnp/tests/third_party/cupy/creation_tests/test_from_data.py +++ b/dpnp/tests/third_party/cupy/creation_tests/test_from_data.py @@ -11,6 +11,7 @@ class TestFromData(unittest.TestCase): + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -491,7 +492,7 @@ def test_asarray_cuda_array_zero_dim(self, xp): @testing.numpy_cupy_array_equal() def test_asarray_cuda_array_zero_dim_dtype(self, xp): - a = xp.ones((), dtype=numpy.float32) + a = xp.ones((), dtype=cupy.default_float_type()) return xp.ascontiguousarray(a, dtype=numpy.int64) @pytest.mark.skip("only native byteorder is supported") @@ -580,7 +581,7 @@ def test_loadtxt(self, xp): fh.seek(0) return xp.loadtxt(fh, dtype="u1") - @pytest.mark.skip("`genfromtxt` isn't supported") + @pytest.mark.skip("genfromtxt() is not supported yet") @testing.numpy_cupy_array_equal() def test_genfromtxt(self, xp): with tempfile.TemporaryFile() as fh: @@ -602,7 +603,7 @@ def test_fromfile_big_endian(self, xp): return a + a -max_cuda_array_interface_version = 1 +max_cuda_array_interface_version = 3 @testing.parameterize( @@ -613,6 +614,7 @@ def test_fromfile_big_endian(self, xp): } ) ) +@pytest.mark.skip("CUDA array interface is not supported") class TestCudaArrayInterface(unittest.TestCase): @testing.for_all_dtypes() def test_base(self, dtype): @@ -622,7 +624,6 @@ def test_base(self, dtype): ) testing.assert_array_equal(a, b) - @pytest.mark.skip("that isn't supported yet") @testing.for_all_dtypes() def test_not_copied(self, dtype): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -661,7 +662,6 @@ def test_with_zero_size_array(self, dtype): assert a.nbytes == b.nbytes assert a.size == 0 - @pytest.mark.skip("that isn't supported yet") @testing.for_all_dtypes() def test_asnumpy(self, dtype): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -670,7 +670,6 @@ def test_asnumpy(self, dtype): b_cpu = cupy.asnumpy(b) testing.assert_array_equal(a_cpu, b_cpu) - @pytest.mark.skip("only native byteorder is supported") def test_big_endian(self): a = cupy.array([0x1, 0x0, 0x0, 0x0], dtype=numpy.int8) dtype = numpy.dtype(">i4") @@ -694,9 +693,9 @@ def test_big_endian(self): } ) ) +@pytest.mark.skip("CUDA array interface is not supported") class TestCudaArrayInterfaceMaskedArray(unittest.TestCase): # TODO(leofang): update this test when masked array is supported - @pytest.mark.skip("that isn't supported") @testing.for_all_dtypes() def test_masked_array(self, dtype): a = testing.shaped_arange((2, 3, 4), cupy, dtype) @@ -707,9 +706,10 @@ def test_masked_array(self, dtype): assert "does not support" in str(ex.value) -@pytest.mark.skip() +# marked slow as either numpy or cupy could go OOM in this test +@testing.slow +@pytest.mark.skip("CUDA array interface is not supported") class TestCudaArrayInterfaceBigArray(unittest.TestCase): - @pytest.mark.skip("that isn't supported") def test_with_over_size_array(self): # real example from #3009 size = 5 * 10**8 @@ -732,15 +732,14 @@ def __init__(self, a, ver, include_strides=False, mask=None, stream=None): self.stream = stream @property - def __sycl_usm_array_interface__(self): + def __cuda_array_interface__(self): if self.a is not None: desc = { "shape": self.a.shape, "typestr": self.a.dtype.str, - "data": (self.a.get_array()._pointer, False), - "version": self.a.__sycl_usm_array_interface__["version"], - "syclobj": self.a.sycl_queue, - "offset": self.a.get_array()._element_offset, + "descr": self.a.dtype.descr, + "data": (self.a.data.ptr, False), + "version": self.ver, } if self.a.flags.c_contiguous: if self.include_strides is True: @@ -790,6 +789,7 @@ def __sycl_usm_array_interface__(self): ) ) class TestArrayPreservationOfShape(unittest.TestCase): + @testing.for_all_dtypes() def test_cupy_array(self, dtype): if self.xp is numpy and self.copy is False: @@ -814,6 +814,7 @@ def test_cupy_array(self, dtype): ) ) class TestArrayCopy(unittest.TestCase): + @testing.for_all_dtypes() def test_cupy_array(self, dtype): if self.xp is numpy and self.copy is False: @@ -833,6 +834,7 @@ def test_cupy_array(self, dtype): class TestArrayInvalidObject(unittest.TestCase): + def test_invalid_type(self): a = numpy.array([1, 2, 3], dtype=object) with self.assertRaises(TypeError): diff --git a/dpnp/tests/third_party/cupy/creation_tests/test_matrix.py b/dpnp/tests/third_party/cupy/creation_tests/test_matrix.py index fac3b51fdd7c..799ed954c70e 100644 --- a/dpnp/tests/third_party/cupy/creation_tests/test_matrix.py +++ b/dpnp/tests/third_party/cupy/creation_tests/test_matrix.py @@ -4,10 +4,12 @@ import pytest import dpnp as cupy +from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing class TestMatrix(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_diag1(self, xp): a = testing.shaped_arange((3, 3), xp) @@ -107,6 +109,7 @@ def test_diagflat_from_scalar_with_k1(self, xp): {"shape": (4, 3)}, ) class TestTri(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_tri(self, xp, dtype): @@ -130,13 +133,14 @@ def test_tri_posi(self, xp, dtype): {"shape": (2, 3, 4)}, ) class TestTriLowerAndUpper(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_tril(self, xp, dtype): m = testing.shaped_arange(self.shape, xp, dtype) return xp.tril(m) - @pytest.mark.skip("list as input arg is not supported") + @pytest.mark.skip("List input is not supported") @testing.numpy_cupy_array_equal() def test_tril_array_like(self, xp): return xp.tril([[1, 2], [3, 4]]) @@ -159,7 +163,7 @@ def test_triu(self, xp, dtype): m = testing.shaped_arange(self.shape, xp, dtype) return xp.triu(m) - @pytest.mark.skip("list as input arg is not supported") + @pytest.mark.skip("List input is not supported") @testing.numpy_cupy_array_equal() def test_triu_array_like(self, xp): return xp.triu([[1, 2], [3, 4]]) @@ -181,8 +185,9 @@ def test_triu_posi(self, xp, dtype): *testing.product({"N": [None, 0, 1, 2, 3], "increasing": [False, True]}) ) class TestVander(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) - @testing.numpy_cupy_allclose(type_check=False) + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_vander(self, xp, dtype): a = testing.shaped_arange((3,), xp, dtype=dtype) return xp.vander(a, N=self.N, increasing=self.increasing) diff --git a/dpnp/tests/third_party/cupy/creation_tests/test_ranges.py b/dpnp/tests/third_party/cupy/creation_tests/test_ranges.py index 72249622334d..b0d209e2570d 100644 --- a/dpnp/tests/third_party/cupy/creation_tests/test_ranges.py +++ b/dpnp/tests/third_party/cupy/creation_tests/test_ranges.py @@ -33,6 +33,7 @@ def wrapper(self, *args, **kwargs): class TestRanges(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_arange(self, xp, dtype): @@ -51,7 +52,7 @@ def test_arange3(self, xp, dtype): @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_arange4(self, xp, dtype): - return xp.arange(20, 2, -3, dtype=dtype) + return xp.arange(20, 2, -3).astype(dtype) @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() @@ -71,8 +72,9 @@ def test_arange7(self, xp, dtype): @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_arange8(self, xp, dtype): - return xp.arange(10, 8, -1, dtype=dtype) + return xp.arange(10, 8, -1).astype(dtype) + @testing.with_requires("numpy>=1.24") def test_arange9(self): for xp in (numpy, cupy): with pytest.raises((ValueError, TypeError)): @@ -119,7 +121,7 @@ def test_linspace_zero_num_no_endopoint_with_retstep(self, xp, dtype): x, step = xp.linspace( 0, 10, 0, dtype=dtype, endpoint=False, retstep=True ) - self.assertTrue(math.isnan(step)) + assert math.isnan(step) return x @testing.with_requires("numpy>=1.18") @@ -130,7 +132,7 @@ def test_linspace_one_num_no_endopoint_with_retstep(self, xp, dtype): x, step = xp.linspace( start, stop, 1, dtype=dtype, endpoint=False, retstep=True ) - self.assertEqual(step, stop - start) + assert step == stop - start return x @testing.for_all_dtypes(no_bool=True) @@ -147,7 +149,7 @@ def test_linspace_no_endpoint(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_linspace_with_retstep(self, xp, dtype): x, step = xp.linspace(0, 10, 5, dtype=dtype, retstep=True) - self.assertEqual(step, 2.5) + assert step == 2.5 return x @testing.numpy_cupy_allclose(rtol=1e-4, type_check=has_support_aspect64()) @@ -158,7 +160,7 @@ def test_linspace_no_dtype_int(self, xp): def test_linspace_no_dtype_float(self, xp): return xp.linspace(0.0, 10.0, 50) - @testing.numpy_cupy_array_equal() + @testing.numpy_cupy_allclose() def test_linspace_float_args_with_int_dtype(self, xp): return xp.linspace(0.1, 9.1, 11, dtype=int) @@ -193,10 +195,16 @@ def test_linspace_array_start_stop(self, xp, dtype_range, dtype_out): @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes_combination( - names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + names=("dtype_range", "dtype_out"), + no_bool=True, + no_complex=True, + no_float16=True, + ) + @testing.numpy_cupy_allclose( + rtol={"default": 5e-6, numpy.float16: 1e-2, numpy.float32: 1e-5} ) - @testing.numpy_cupy_allclose(rtol=1e-04) def test_linspace_mixed_start_stop(self, xp, dtype_range, dtype_out): + # TODO (ev-br): np 2.0: check if can re-enable float16 start = 0.0 if xp.dtype(dtype_range).kind in "u": stop = xp.array([100, 16], dtype=dtype_range) @@ -206,10 +214,18 @@ def test_linspace_mixed_start_stop(self, xp, dtype_range, dtype_out): @testing.with_requires("numpy>=1.16") @testing.for_all_dtypes_combination( - names=("dtype_range", "dtype_out"), no_bool=True, no_complex=True + names=("dtype_range", "dtype_out"), + no_bool=True, + no_complex=True, + no_float16=True, + ) + @testing.numpy_cupy_allclose( + rtol={"default": 5e-6, numpy.float16: 1e-2, numpy.float32: 5e-6} ) - @testing.numpy_cupy_allclose(rtol=1e-04) def test_linspace_mixed_start_stop2(self, xp, dtype_range, dtype_out): + # TODO (ev-br): np 2.0: check if can re-enable float16 + # TODO (ev-br): np 2.0: had to bump the default rtol on Windows + # and numpy 1.26+weak promotion from 0 to 5e-6 if xp.dtype(dtype_range).kind in "u": start = xp.array([160, 120], dtype=dtype_range) else: @@ -312,6 +328,7 @@ def test_logspace_array_start_stop_axis1(self, xp, dtype_range, dtype_out): ) ) class TestMeshgrid(unittest.TestCase): + @testing.for_all_dtypes() def test_meshgrid0(self, dtype): out = cupy.meshgrid( @@ -348,6 +365,7 @@ def test_meshgrid3(self, xp, dtype): class TestMgrid(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_mgrid0(self, xp): return xp.mgrid[0:] @@ -380,6 +398,7 @@ def test_mgrid5(self, xp): class TestOgrid(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_ogrid0(self, xp): return xp.ogrid[0:] diff --git a/dpnp/tests/third_party/cupy/fft_tests/test_cache.py b/dpnp/tests/third_party/cupy/fft_tests/test_cache.py new file mode 100644 index 000000000000..dc2a82a23858 --- /dev/null +++ b/dpnp/tests/third_party/cupy/fft_tests/test_cache.py @@ -0,0 +1,510 @@ +import contextlib +import io +import queue +import threading +import unittest + +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# from cupy.cuda import cufft +# from cupy.cuda import device +# from cupy.cuda import runtime +# from cupy.fft import config + +# from .test_fft import (multi_gpu_config, _skip_multi_gpu_bug) + +pytest.skip("FFT cache functions are not supported", allow_module_level=True) + + +def intercept_stdout(func): + with io.StringIO() as buf, contextlib.redirect_stdout(buf): + func() + stdout = buf.getvalue() + return stdout + + +n_devices = runtime.getDeviceCount() + + +class TestPlanCache(unittest.TestCase): + def setUp(self): + self.caches = [] + self.old_sizes = [] + for i in range(n_devices): + with device.Device(i): + cache = config.get_plan_cache() + self.old_sizes.append(cache.get_size()) + cache.clear() + cache.set_memsize(-1) + cache.set_size(2) + self.caches.append(cache) + + def tearDown(self): + for i in range(n_devices): + with device.Device(i): + cache = config.get_plan_cache() + cache.clear() + cache.set_size(self.old_sizes[i]) + cache.set_memsize(-1) + + def test_LRU_cache1(self): + # test if insertion and clean-up works + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + + cache.clear() + assert cache.get_curr_size() == 0 <= cache.get_size() + + def test_LRU_cache2(self): + # test if plan is reused + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + # run once and fetch the cached plan + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + iterator = iter(cache) + plan0 = next(iterator)[1].plan + + # repeat + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + iterator = iter(cache) + plan1 = next(iterator)[1].plan + + # we should get the same plan + assert plan0 is plan1 + + def test_LRU_cache3(self): + # test if cache size is limited + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + # run once and fetch the cached plan + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + iterator = iter(cache) + plan = next(iterator)[1].plan + + # run another two FFTs with different sizes so that the first + # plan is discarded from the cache + a = testing.shaped_random((20,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + a = testing.shaped_random((30,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + + # check if the first plan is indeed not cached + for _, node in cache: + assert plan is not node.plan + + def test_LRU_cache4(self): + # test if fetching the plan will reorder it to the top + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + # this creates a Plan1d + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + + # this creates a PlanNd + a = testing.shaped_random((10, 20), cupy, cupy.float32) + cupy.fft.fftn(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + + # The first in the cache is the most recently used one; + # using an iterator to access the linked list guarantees that + # we don't alter the cache order + iterator = iter(cache) + assert isinstance(next(iterator)[1].plan, cufft.PlanNd) + assert isinstance(next(iterator)[1].plan, cufft.Plan1d) + with pytest.raises(StopIteration): + next(iterator) + + # this brings Plan1d to the top + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + iterator = iter(cache) + assert isinstance(next(iterator)[1].plan, cufft.Plan1d) + assert isinstance(next(iterator)[1].plan, cufft.PlanNd) + with pytest.raises(StopIteration): + next(iterator) + + # An LRU cache guarantees that such a silly operation never + # raises StopIteration + iterator = iter(cache) + for i in range(100): + cache[next(iterator)[0]] + + @testing.multi_gpu(2) + def test_LRU_cache5(self): + # test if the LRU cache is thread-local + + def init_caches(gpus): + for i in gpus: + with device.Device(i): + config.get_plan_cache() + + # Testing in the current thread: in setUp() we ensure all caches + # are initialized + stdout = intercept_stdout(config.show_plan_cache_info) + assert "uninitialized" not in stdout + + def thread_show_plan_cache_info(queue): + # allow output from another thread to be accessed by the + # main thread + cupy.cuda.Device().use() + stdout = intercept_stdout(config.show_plan_cache_info) + queue.put(stdout) + + # When starting a new thread, the cache is uninitialized there + # (for both devices) + q = queue.Queue() + thread = threading.Thread(target=thread_show_plan_cache_info, args=(q,)) + thread.start() + thread.join() + stdout = q.get() + assert stdout.count("uninitialized") == n_devices + + def thread_init_caches(gpus, queue): + cupy.cuda.Device().use() + init_caches(gpus) + thread_show_plan_cache_info(queue) + + # Now let's try initializing device 0 on another thread + thread = threading.Thread( + target=thread_init_caches, + args=( + [0], + q, + ), + ) + thread.start() + thread.join() + stdout = q.get() + assert stdout.count("uninitialized") == n_devices - 1 + + # ...and this time both devices + thread = threading.Thread( + target=thread_init_caches, + args=( + [0, 1], + q, + ), + ) + thread.start() + thread.join() + stdout = q.get() + assert stdout.count("uninitialized") == n_devices - 2 + + @testing.multi_gpu(2) + def test_LRU_cache6(self): + # test if each device has a separate cache + cache0 = self.caches[0] + cache1 = self.caches[1] + + # ensure a fresh state + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # do some computation on GPU 0 + with device.Device(0): + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache0.get_curr_size() == 1 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # do some computation on GPU 1 + with device.Device(1): + c = testing.shaped_random((16,), cupy, cupy.float64) + cupy.fft.fft(c) + assert cache0.get_curr_size() == 1 <= cache0.get_size() + assert cache1.get_curr_size() == 1 <= cache1.get_size() + + # reset device 0 + cache0.clear() + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 1 <= cache1.get_size() + + # reset device 1 + cache1.clear() + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + @testing.multi_gpu(2) + @pytest.mark.skipif( + runtime.is_hip, reason="hipFFT doesn't support multi-GPU" + ) + def test_LRU_cache7(self): + # test accessing a multi-GPU plan + cache0 = self.caches[0] + cache1 = self.caches[1] + + # ensure a fresh state + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # do some computation on GPU 0 + with device.Device(0): + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache0.get_curr_size() == 1 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # do a multi-GPU FFT + config.use_multi_gpus = True + config.set_cufft_gpus([0, 1]) + c = testing.shaped_random((128,), cupy, cupy.complex64) + cupy.fft.fft(c) + assert cache0.get_curr_size() == 2 <= cache0.get_size() + assert cache1.get_curr_size() == 1 <= cache1.get_size() + + # check both devices' caches see the same multi-GPU plan + plan0 = next(iter(cache0))[1].plan + plan1 = next(iter(cache1))[1].plan + assert plan0 is plan1 + + # reset + config.use_multi_gpus = False + config._device = None + + # do some computation on GPU 1 + with device.Device(1): + e = testing.shaped_random((20,), cupy, cupy.complex128) + cupy.fft.fft(e) + assert cache0.get_curr_size() == 2 <= cache0.get_size() + assert cache1.get_curr_size() == 2 <= cache1.get_size() + + # by this time, the multi-GPU plan remains the most recently + # used one on GPU 0, but not on GPU 1 + assert plan0 is next(iter(cache0))[1].plan + assert plan1 is not next(iter(cache1))[1].plan + + # now use it again to make it the most recent + config.use_multi_gpus = True + config.set_cufft_gpus([0, 1]) + c = testing.shaped_random((128,), cupy, cupy.complex64) + cupy.fft.fft(c) + assert cache0.get_curr_size() == 2 <= cache0.get_size() + assert cache1.get_curr_size() == 2 <= cache1.get_size() + assert plan0 is next(iter(cache0))[1].plan + assert plan1 is next(iter(cache1))[1].plan + # reset + config.use_multi_gpus = False + config._device = None + + # Do 2 more different FFTs on one of the devices, and the + # multi-GPU plan would be discarded from both caches + with device.Device(1): + x = testing.shaped_random((30,), cupy, cupy.complex128) + cupy.fft.fft(x) + y = testing.shaped_random((40, 40), cupy, cupy.complex64) + cupy.fft.fftn(y) + for _, node in cache0: + assert plan0 is not node.plan + for _, node in cache1: + assert plan1 is not node.plan + assert cache0.get_curr_size() == 1 <= cache0.get_size() + assert cache1.get_curr_size() == 2 <= cache1.get_size() + + def test_LRU_cache8(self): + # test if Plan1d and PlanNd can coexist in the same cache + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + # do a 1D FFT + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + assert isinstance(next(iter(cache))[1].plan, cufft.Plan1d) + + # then a 3D FFT + a = testing.shaped_random((8, 8, 8), cupy, cupy.complex128) + cupy.fft.fftn(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + iterator = iter(cache) + + # the cached order is 1. PlanNd, 2. Plan1d + assert isinstance(next(iterator)[1].plan, cufft.PlanNd) + assert isinstance(next(iterator)[1].plan, cufft.Plan1d) + + def test_LRU_cache9(self): + # test if memsizes in the cache adds up + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + memsize = 0 + a = testing.shaped_random((10,), cupy, cupy.float32) + cupy.fft.fft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + memsize += next(iter(cache))[1].plan.work_area.mem.size + + a = testing.shaped_random((48,), cupy, cupy.complex64) + cupy.fft.fft(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + memsize += next(iter(cache))[1].plan.work_area.mem.size + + assert memsize == cache.get_curr_memsize() + + def test_LRU_cache10(self): + # test if deletion works and if show_info() is consistent with data + cache = config.get_plan_cache() + assert cache.get_curr_size() == 0 <= cache.get_size() + + curr_size = 0 + size = 2 + curr_memsize = 0 + memsize = "(unlimited)" # default + + a = testing.shaped_random((16, 16), cupy, cupy.float32) + cupy.fft.fft2(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + node1 = next(iter(cache))[1] + curr_size += 1 + curr_memsize += node1.plan.work_area.mem.size + stdout = intercept_stdout(cache.show_info) + assert "{0} / {1} (counts)".format(curr_size, size) in stdout + assert "{0} / {1} (bytes)".format(curr_memsize, memsize) in stdout + assert str(node1) in stdout + + a = testing.shaped_random((1024,), cupy, cupy.complex64) + cupy.fft.ifft(a) + assert cache.get_curr_size() == 2 <= cache.get_size() + node2 = next(iter(cache))[1] + curr_size += 1 + curr_memsize += node2.plan.work_area.mem.size + stdout = intercept_stdout(cache.show_info) + assert "{0} / {1} (counts)".format(curr_size, size) in stdout + assert "{0} / {1} (bytes)".format(curr_memsize, memsize) in stdout + assert str(node2) + "\n" + str(node1) in stdout + + # test deletion + key = node2.key + del cache[key] + assert cache.get_curr_size() == 1 <= cache.get_size() + curr_size -= 1 + curr_memsize -= node2.plan.work_area.mem.size + stdout = intercept_stdout(cache.show_info) + assert "{0} / {1} (counts)".format(curr_size, size) in stdout + assert "{0} / {1} (bytes)".format(curr_memsize, memsize) in stdout + assert str(node2) not in stdout + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.multi_gpu(2) + @pytest.mark.skipif( + runtime.is_hip, reason="hipFFT doesn't support multi-GPU" + ) + def test_LRU_cache11(self): + # test if collectively deleting a multi-GPU plan works + _skip_multi_gpu_bug((128,), self.gpus) + cache0 = self.caches[0] + cache1 = self.caches[1] + + # ensure a fresh state + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # do a multi-GPU FFT + c = testing.shaped_random((128,), cupy, cupy.complex64) + cupy.fft.fft(c) + assert cache0.get_curr_size() == 1 <= cache0.get_size() + assert cache1.get_curr_size() == 1 <= cache1.get_size() + + node0 = next(iter(cache0))[1] + node1 = next(iter(cache1))[1] + assert node0.key == node1.key + assert node0.plan is node1.plan + assert cache0.get_curr_memsize() == node0.memsize > 0 + assert cache1.get_curr_memsize() == node1.memsize > 0 + + # delete + del cache0[node0.key] + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + assert cache0.get_curr_memsize() == 0 + assert cache1.get_curr_memsize() == 0 + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.multi_gpu(2) + @pytest.mark.skipif( + runtime.is_hip, reason="hipFFT doesn't support multi-GPU" + ) + def test_LRU_cache12(self): + # test if an error is raise when one of the caches is unable + # to fit it a multi-GPU plan + cache0 = self.caches[0] + cache1 = self.caches[1] + + # ensure a fresh state + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + # make it impossible to cache + cache1.set_memsize(1) + + # do a multi-GPU FFT + with pytest.raises(RuntimeError) as e: + c = testing.shaped_random((128,), cupy, cupy.complex64) + cupy.fft.fft(c) + assert "plan memsize is too large for device 1" in str(e.value) + assert cache0.get_curr_size() == 0 <= cache0.get_size() + assert cache1.get_curr_size() == 0 <= cache1.get_size() + + @unittest.skipIf(runtime.is_hip, "rocFFT has different plan sizes") + @unittest.skipIf( + runtime.runtimeGetVersion() >= 11080, + "CUDA 11.8 has different plan size", + ) + def test_LRU_cache13(self): + # test if plan insertion respect the memory size limit + cache = config.get_plan_cache() + cache.set_memsize(1024) + + # ensure a fresh state + assert cache.get_curr_size() == 0 <= cache.get_size() + + # On CUDA 10.0 + sm75, this generates a plan of size 1024 bytes + a = testing.shaped_random((128,), cupy, cupy.complex64) + cupy.fft.ifft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + assert cache.get_curr_memsize() == 1024 == cache.get_memsize() + + # a second plan (of same size) is generated, but the cache is full, + # so the first plan is evicted + a = testing.shaped_random((64,), cupy, cupy.complex128) + cupy.fft.ifft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + assert cache.get_curr_memsize() == 1024 == cache.get_memsize() + plan = next(iter(cache))[1].plan + + # this plan is twice as large, so won't fit in + a = testing.shaped_random((128,), cupy, cupy.complex128) + with pytest.raises(RuntimeError) as e: + cupy.fft.ifft(a) + assert "memsize is too large" in str(e.value) + # the cache remains intact + assert cache.get_curr_size() == 1 <= cache.get_size() + assert cache.get_curr_memsize() == 1024 == cache.get_memsize() + plan1 = next(iter(cache))[1].plan + assert plan1 is plan + + # double the cache size would make the plan just fit (and evict + # the existing one) + cache.set_memsize(2048) + cupy.fft.ifft(a) + assert cache.get_curr_size() == 1 <= cache.get_size() + assert cache.get_curr_memsize() == 2048 == cache.get_memsize() + plan2 = next(iter(cache))[1].plan + assert plan2 is not plan diff --git a/dpnp/tests/third_party/cupy/fft_tests/test_callback.py b/dpnp/tests/third_party/cupy/fft_tests/test_callback.py new file mode 100644 index 000000000000..12bfd4aa73df --- /dev/null +++ b/dpnp/tests/third_party/cupy/fft_tests/test_callback.py @@ -0,0 +1,831 @@ +import contextlib +import string +import sys +import tempfile +from unittest import mock + +import numpy as np +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("FFT callbacks are not supported", allow_module_level=True) + + +@contextlib.contextmanager +def use_temporary_cache_dir(): + target = "cupy.fft._callback.get_cache_dir" + with tempfile.TemporaryDirectory() as path: + with mock.patch(target, lambda: path): + yield path + + +_load_callback = r""" +__device__ ${data_type} CB_ConvertInput( + void* dataIn, size_t offset, void* callerInfo, void* sharedPtr) +{ + ${data_type} x = ((${data_type}*)dataIn)[offset]; + ${element} *= 2.5; + return x; +} + +__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput; +""" + +_load_callback_with_aux = r""" +__device__ ${data_type} CB_ConvertInput( + void* dataIn, size_t offset, void* callerInfo, void* sharedPtr) +{ + ${data_type} x = ((${data_type}*)dataIn)[offset]; + ${element} *= *((${aux_type}*)callerInfo); + return x; +} + +__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput; +""" + +_load_callback_with_aux2 = r""" +__device__ ${data_type} CB_ConvertInput( + void* dataIn, size_t offset, void* callerInfo, void* sharedPtr) +{ + ${data_type} x = ((${data_type}*)dataIn)[offset]; + ${element} *= ((${aux_type}*)callerInfo)[offset]; + return x; +} + +__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput; +""" + +_store_callback = r""" +__device__ void CB_ConvertOutput( + void *dataOut, size_t offset, ${data_type} element, + void *callerInfo, void *sharedPointer) +{ + ${data_type} x = element; + ${element} /= 3.8; + ((${data_type}*)dataOut)[offset] = x; +} + +__device__ ${store_type} d_storeCallbackPtr = CB_ConvertOutput; +""" + +_store_callback_with_aux = r""" +__device__ void CB_ConvertOutput( + void *dataOut, size_t offset, ${data_type} element, + void *callerInfo, void *sharedPointer) +{ + ${data_type} x = element; + ${element} /= *((${aux_type}*)callerInfo); + ((${data_type}*)dataOut)[offset] = x; +} + +__device__ ${store_type} d_storeCallbackPtr = CB_ConvertOutput; +""" + + +def _set_load_cb(code, element, data_type, callback_type, aux_type=None): + return string.Template(code).substitute( + data_type=data_type, + aux_type=aux_type, + load_type=callback_type, + element=element, + ) + + +def _set_store_cb(code, element, data_type, callback_type, aux_type=None): + return string.Template(code).substitute( + data_type=data_type, + aux_type=aux_type, + store_type=callback_type, + element=element, + ) + + +@testing.parameterize( + *testing.product( + { + "n": [None, 5, 10, 15], + "shape": [(10, 7), (10,), (10, 10)], + "norm": [None, "ortho"], + } + ) +) +@testing.with_requires("cython>=0.29.0") +@pytest.mark.skipif( + not sys.platform.startswith("linux"), + reason="callbacks are only supported on Linux", +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="hipFFT does not support callbacks" +) +class Test1dCallbacks: + + def _test_load_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + code = _load_callback + if dtype == np.complex64: + types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + elif dtype == np.complex128: + types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + elif dtype == np.float32: + types = ("x", "cufftReal", "cufftCallbackLoadR") + else: + types = ("x", "cufftDoubleReal", "cufftCallbackLoadD") + cb_load = _set_load_cb(code, *types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, n=self.n, norm=self.norm) + if dtype in (np.float32, np.complex64): + if fft_func != "irfft": + out = out.astype(np.complex64) + else: + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks(cb_load=cb_load): + out = fft(a, n=self.n, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_fft_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "fft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_ifft_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "ifft") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_rfft_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "rfft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_irfft_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "irfft") + + def _test_store_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + code = _store_callback + if dtype == np.complex64: + if fft_func != "irfft": + types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + types = ("x", "cufftReal", "cufftCallbackStoreR") + elif dtype == np.complex128: + if fft_func != "irfft": + types = ("x.y", "cufftDoubleComplex", "cufftCallbackStoreZ") + else: + types = ("x", "cufftDoubleReal", "cufftCallbackStoreD") + elif dtype == np.float32: + types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + elif dtype == np.float64: + types = ("x.y", "cufftDoubleComplex", "cufftCallbackStoreZ") + cb_store = _set_store_cb(code, *types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + out = fft(a, n=self.n, norm=self.norm) + if fft_func != "irfft": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks(cb_store=cb_store): + out = fft(a, n=self.n, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_fft_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "fft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_ifft_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "ifft") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_rfft_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "rfft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_irfft_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "irfft") + + def _test_load_store_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + load_code = _load_callback + store_code = _store_callback + if fft_func in ("fft", "ifft"): + if dtype == np.complex64: + load_types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + store_types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + load_types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + ) + elif fft_func == "rfft": + if dtype == np.float32: + load_types = ("x", "cufftReal", "cufftCallbackLoadR") + store_types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + load_types = ("x", "cufftDoubleReal", "cufftCallbackLoadD") + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + ) + else: # irfft + if dtype == np.complex64: + load_types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + store_types = ("x", "cufftReal", "cufftCallbackStoreR") + else: + load_types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + store_types = ("x", "cufftDoubleReal", "cufftCallbackStoreD") + cb_load = _set_load_cb(load_code, *load_types) + cb_store = _set_store_cb(store_code, *store_types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, n=self.n, norm=self.norm) + if fft_func != "irfft": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks( + cb_load=cb_load, cb_store=cb_store + ): + out = fft(a, n=self.n, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_fft_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "fft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_ifft_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "ifft") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_rfft_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "rfft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_irfft_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "irfft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_fft_load_aux(self, xp, dtype): + fft = xp.fft.fft + c = _load_callback_with_aux2 + if dtype == np.complex64: + cb_load = _set_load_cb( + c, "x.x", "cufftComplex", "cufftCallbackLoadC", "float" + ) + else: + cb_load = _set_load_cb( + c, "x.x", "cufftDoubleComplex", "cufftCallbackLoadZ", "double" + ) + + a = testing.shaped_random(self.shape, xp, dtype) + out_last = self.n if self.n is not None else self.shape[-1] + out_shape = list(self.shape) + out_shape[-1] = out_last + last_min = min(self.shape[-1], out_last) + b = xp.arange(np.prod(out_shape), dtype=xp.dtype(dtype).char.lower()) + b = b.reshape(out_shape) + if xp is np: + x = np.zeros(out_shape, dtype=dtype) + x[..., 0:last_min] = a[..., 0:last_min] + x.real *= b + out = fft(x, n=self.n, norm=self.norm) + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks( + cb_load=cb_load, cb_load_aux_arr=b + ): + out = fft(a, n=self.n, norm=self.norm) + + return out + + def _test_load_store_aux_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + load_code = _load_callback_with_aux + store_code = _store_callback_with_aux + if xp is cupy: + load_aux = xp.asarray(2.5, dtype=xp.dtype(dtype).char.lower()) + store_aux = xp.asarray(3.8, dtype=xp.dtype(dtype).char.lower()) + + if fft_func in ("fft", "ifft"): + if dtype == np.complex64: + load_types = ( + "x.x", + "cufftComplex", + "cufftCallbackLoadC", + "float", + ) + store_types = ( + "x.y", + "cufftComplex", + "cufftCallbackStoreC", + "float", + ) + else: + load_types = ( + "x.x", + "cufftDoubleComplex", + "cufftCallbackLoadZ", + "double", + ) + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + "double", + ) + elif fft_func == "rfft": + if dtype == np.float32: + load_types = ("x", "cufftReal", "cufftCallbackLoadR", "float") + store_types = ( + "x.y", + "cufftComplex", + "cufftCallbackStoreC", + "float", + ) + else: + load_types = ( + "x", + "cufftDoubleReal", + "cufftCallbackLoadD", + "double", + ) + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + "double", + ) + else: # irfft + if dtype == np.complex64: + load_types = ( + "x.x", + "cufftComplex", + "cufftCallbackLoadC", + "float", + ) + store_types = ("x", "cufftReal", "cufftCallbackStoreR", "float") + else: + load_types = ( + "x.x", + "cufftDoubleComplex", + "cufftCallbackLoadZ", + "double", + ) + store_types = ( + "x", + "cufftDoubleReal", + "cufftCallbackStoreD", + "double", + ) + cb_load = _set_load_cb(load_code, *load_types) + cb_store = _set_store_cb(store_code, *store_types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, n=self.n, norm=self.norm) + if fft_func != "irfft": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks( + cb_load=cb_load, + cb_store=cb_store, + cb_load_aux_arr=load_aux, + cb_store_aux_arr=store_aux, + ): + out = fft(a, n=self.n, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_fft_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "fft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_ifft_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "ifft") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_rfft_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "rfft") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False) + def test_irfft_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "irfft") + + +@testing.parameterize( + {"shape": (3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1), "norm": None}, + {"shape": (3, 4), "s": None, "axes": (-2, -1), "norm": None}, + {"shape": (3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1), "norm": None}, + {"shape": (2, 3, 4), "s": None, "axes": None, "norm": "ortho"}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2), "norm": "ortho"}, +) +@testing.with_requires("cython>=0.29.0") +@pytest.mark.skipif( + not sys.platform.startswith("linux"), + reason="callbacks are only supported on Linux", +) +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="hipFFT does not support callbacks" +) +class TestNdCallbacks: + + def _test_load_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + load_code = _load_callback + if dtype == np.complex64: + types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + elif dtype == np.complex128: + types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + elif dtype == np.float32: + types = ("x", "cufftReal", "cufftCallbackLoadR") + else: + types = ("x", "cufftDoubleReal", "cufftCallbackLoadD") + cb_load = _set_load_cb(load_code, *types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + if dtype in (np.float32, np.complex64): + if fft_func != "irfftn": + out = out.astype(np.complex64) + else: + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks(cb_load=cb_load): + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fftn_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "fftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifftn_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "ifftn") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfftn_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "rfftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfftn_load(self, xp, dtype): + return self._test_load_helper(xp, dtype, "irfftn") + + def _test_store_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + store_code = _store_callback + if dtype == np.complex64: + if fft_func != "irfftn": + types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + types = ("x", "cufftReal", "cufftCallbackStoreR") + elif dtype == np.complex128: + if fft_func != "irfftn": + types = ("x.y", "cufftDoubleComplex", "cufftCallbackStoreZ") + else: + types = ("x", "cufftDoubleReal", "cufftCallbackStoreD") + elif dtype == np.float32: + types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + elif dtype == np.float64: + types = ("x.y", "cufftDoubleComplex", "cufftCallbackStoreZ") + cb_store = _set_store_cb(store_code, *types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + if fft_func != "irfftn": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks(cb_store=cb_store): + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fftn_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "fftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifftn_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "ifftn") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfftn_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "rfftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfftn_store(self, xp, dtype): + return self._test_store_helper(xp, dtype, "irfftn") + + def _test_load_store_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + load_code = _load_callback + store_code = _store_callback + if fft_func in ("fftn", "ifftn"): + if dtype == np.complex64: + load_types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + store_types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + load_types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + ) + elif fft_func == "rfftn": + if dtype == np.float32: + load_types = ("x", "cufftReal", "cufftCallbackLoadR") + store_types = ("x.y", "cufftComplex", "cufftCallbackStoreC") + else: + load_types = ("x", "cufftDoubleReal", "cufftCallbackLoadD") + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + ) + else: # irfft + if dtype == np.complex64: + load_types = ("x.x", "cufftComplex", "cufftCallbackLoadC") + store_types = ("x", "cufftReal", "cufftCallbackStoreR") + else: + load_types = ("x.x", "cufftDoubleComplex", "cufftCallbackLoadZ") + store_types = ("x", "cufftDoubleReal", "cufftCallbackStoreD") + cb_load = _set_load_cb(load_code, *load_types) + cb_store = _set_store_cb(store_code, *store_types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + if fft_func != "irfftn": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks( + cb_load=cb_load, cb_store=cb_store + ): + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fftn_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "fftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifftn_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "ifftn") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfftn_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "rfftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfftn_load_store(self, xp, dtype): + return self._test_load_store_helper(xp, dtype, "irfftn") + + def _test_load_store_aux_helper(self, xp, dtype, fft_func): + fft = getattr(xp.fft, fft_func) + load_code = _load_callback_with_aux + store_code = _store_callback_with_aux + if xp is cupy: + load_aux = xp.asarray(2.5, dtype=xp.dtype(dtype).char.lower()) + store_aux = xp.asarray(3.8, dtype=xp.dtype(dtype).char.lower()) + + if fft_func in ("fftn", "ifftn"): + if dtype == np.complex64: + load_types = ( + "x.x", + "cufftComplex", + "cufftCallbackLoadC", + "float", + ) + store_types = ( + "x.y", + "cufftComplex", + "cufftCallbackStoreC", + "float", + ) + else: + load_types = ( + "x.x", + "cufftDoubleComplex", + "cufftCallbackLoadZ", + "double", + ) + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + "double", + ) + elif fft_func == "rfftn": + if dtype == np.float32: + load_types = ("x", "cufftReal", "cufftCallbackLoadR", "float") + store_types = ( + "x.y", + "cufftComplex", + "cufftCallbackStoreC", + "float", + ) + else: + load_types = ( + "x", + "cufftDoubleReal", + "cufftCallbackLoadD", + "double", + ) + store_types = ( + "x.y", + "cufftDoubleComplex", + "cufftCallbackStoreZ", + "double", + ) + else: # irfftn + if dtype == np.complex64: + load_types = ( + "x.x", + "cufftComplex", + "cufftCallbackLoadC", + "float", + ) + store_types = ("x", "cufftReal", "cufftCallbackStoreR", "float") + else: + load_types = ( + "x.x", + "cufftDoubleComplex", + "cufftCallbackLoadZ", + "double", + ) + store_types = ( + "x", + "cufftDoubleReal", + "cufftCallbackStoreD", + "double", + ) + cb_load = _set_load_cb(load_code, *load_types) + cb_store = _set_store_cb(store_code, *store_types) + + a = testing.shaped_random(self.shape, xp, dtype) + if xp is np: + a.real *= 2.5 + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + if fft_func != "irfftn": + out.imag /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.complex64) + else: + out /= 3.8 + if dtype in (np.float32, np.complex64): + out = out.astype(np.float32) + else: + with use_temporary_cache_dir(): + with xp.fft.config.set_cufft_callbacks( + cb_load=cb_load, + cb_store=cb_store, + cb_load_aux_arr=load_aux, + cb_store_aux_arr=store_aux, + ): + out = fft(a, s=self.s, axes=self.axes, norm=self.norm) + + return out + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fftn_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "fftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifftn_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "ifftn") + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfftn_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "rfftn") + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-4, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfftn_load_store_aux(self, xp, dtype): + return self._test_load_store_aux_helper(xp, dtype, "irfftn") diff --git a/dpnp/tests/third_party/cupy/fft_tests/test_fft.py b/dpnp/tests/third_party/cupy/fft_tests/test_fft.py index 918b6e2a23f8..f4bc6533cb6f 100644 --- a/dpnp/tests/third_party/cupy/fft_tests/test_fft.py +++ b/dpnp/tests/third_party/cupy/fft_tests/test_fft.py @@ -1,4 +1,5 @@ import functools +import warnings import numpy as np import pytest @@ -6,6 +7,7 @@ import dpnp as cupy from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing +from dpnp.tests.third_party.cupy.testing._loops import _wraps_partial @pytest.fixture @@ -15,20 +17,102 @@ def skip_forward_backward(request): pytest.skip("forward/backward is supported by NumPy 1.20+") +def nd_planning_states(states=[True, False], name="enable_nd"): + """Decorator for parameterized tests with and without nd planning + + Tests are repeated with config.enable_nd_planning set to True and False + + Args: + states(list of bool): The boolean cases to test. + name(str): Argument name to which specified dtypes are passed. + + This decorator adds a keyword argument specified by ``name`` + to the test fixture. Then, it runs the fixtures in parallel + by passing the each element of ``dtypes`` to the named + argument. + """ + + def decorator(impl): + @_wraps_partial(impl, name) + def test_func(self, *args, **kw): + # get original global planning state + # planning_state = config.enable_nd_planning + try: + for nd_planning in states: + try: + # enable or disable nd planning + # config.enable_nd_planning = nd_planning + + kw[name] = nd_planning + impl(self, *args, **kw) + except Exception: + print(name, "is", nd_planning) + raise + finally: + # restore original global planning state + # config.enable_nd_planning = planning_state + pass + + return test_func + + return decorator + + +def multi_gpu_config(gpu_configs=None): + """Decorator for parameterized tests with different GPU configurations. + + Args: + gpu_configs (list of list): The GPUs to test. + + .. notes: + The decorated tests are skipped if no or only one GPU is available. + """ + + def decorator(impl): + @functools.wraps(impl) + def test_func(self, *args, **kw): + use_multi_gpus = config.use_multi_gpus + _devices = config._devices + + try: + for gpus in gpu_configs: + try: + nGPUs = len(gpus) + assert nGPUs >= 2, "Must use at least two gpus" + config.use_multi_gpus = True + config.set_cufft_gpus(gpus) + self.gpus = gpus + + impl(self, *args, **kw) + except Exception: + print("GPU config is:", gpus) + raise + finally: + config.use_multi_gpus = use_multi_gpus + config._devices = _devices + del self.gpus + + return test_func + + return decorator + + +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *testing.product( { "n": [None, 0, 5, 10, 15], "shape": [(0,), (10, 0), (10,), (10, 10)], - "norm": [None, "backward", "ortho", "forward", ""], + "norm": [None, "backward", "ortho", "forward"], } ) ) class TestFft: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, @@ -36,17 +120,11 @@ class TestFft: ) def test_fft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.fft(a, n=self.n, norm=self.norm) - - # np.fft.fft always returns np.complex128 - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.fft(a, n=self.n, norm=self.norm) @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, @@ -57,14 +135,10 @@ def test_fft(self, xp, dtype): @testing.with_requires("numpy!=1.17.1") def test_ifft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.ifft(a, n=self.n, norm=self.norm) - - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.ifft(a, n=self.n, norm=self.norm) +# @testing.with_requires("numpy>=2.0") @testing.parameterize( *testing.product( { @@ -75,10 +149,11 @@ def test_ifft(self, xp, dtype): ) ) class TestFftOrder: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, - atol=1e-6, + rtol=1e-3, + atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), @@ -87,17 +162,11 @@ def test_fft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) if self.data_order == "F": a = xp.asfortranarray(a) - out = xp.fft.fft(a, axis=self.axis) - - # np.fft.fft always returns np.complex128 - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.fft(a, axis=self.axis) @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, @@ -107,123 +176,308 @@ def test_ifft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) if self.data_order == "F": a = xp.asfortranarray(a) - out = xp.fft.ifft(a, axis=self.axis) + return xp.fft.ifft(a, axis=self.axis) + + +# See #3757 and NVIDIA internal ticket 3093094 +def _skip_multi_gpu_bug(shape, gpus): + # avoid CUDA 11.0 (will be fixed by CUDA 11.2) bug triggered by + # - batch = 1 + # - gpus = [1, 0] + if ( + 11000 <= cupy.cuda.runtime.runtimeGetVersion() < 11020 + and len(shape) == 1 + and gpus == [1, 0] + ): + pytest.skip("avoid CUDA 11 bug") + + +# Almost identical to the TestFft class, except that +# 1. multi-GPU cuFFT is used +# 2. the tested parameter combinations are adjusted to meet the requirements +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *testing.product( + { + "n": [None, 0, 64], + "shape": [(0,), (0, 10), (64,), (4, 64)], + "norm": [None, "backward", "ortho", "forward"], + } + ) +) +@pytest.mark.skip("multi GPU is not supported") +@testing.multi_gpu(2) +class TestMultiGpuFft: - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) - return out + a = testing.shaped_random(self.shape, xp, dtype) + return xp.fft.fft(a, n=self.n, norm=self.norm) + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + # NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug + @testing.with_requires("numpy!=1.17.0") + @testing.with_requires("numpy!=1.17.1") + def test_ifft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) + + a = testing.shaped_random(self.shape, xp, dtype) + return xp.fft.ifft(a, n=self.n, norm=self.norm) + + +# Almost identical to the TestFftOrder class, except that +# 1. multi-GPU cuFFT is used +# 2. the tested parameter combinations are adjusted to meet the requirements +@testing.with_requires("numpy>=2.0") +@testing.parameterize( + *testing.product( + { + "shape": [(10, 10), (10, 5, 10)], + "data_order": ["F", "C"], + "axis": [0, 1, -1], + } + ) +) +@pytest.mark.skip("multi GPU is not supported") +@testing.multi_gpu(2) +class TestMultiGpuFftOrder: + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) + + a = testing.shaped_random(self.shape, xp, dtype) + if self.data_order == "F": + a = xp.asfortranarray(a) + return xp.fft.fft(a, axis=self.axis) + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) + a = testing.shaped_random(self.shape, xp, dtype) + if self.data_order == "F": + a = xp.asfortranarray(a) + return xp.fft.ifft(a, axis=self.axis) + + +@pytest.mark.skip("default FFT function is not supported") +@testing.with_requires("numpy>=2.0") +class TestDefaultPlanType: + + @nd_planning_states() + def test_default_fft_func(self, enable_nd): + # test cases where nd cuFFT plan is possible + ca = cupy.ones((16, 16, 16)) + for axes in [(0, 1), (1, 2), None, (0, 1, 2)]: + fft_func = _default_fft_func(ca, axes=axes) + if enable_nd: + # TODO(leofang): test newer ROCm versions + if axes == (0, 1) and cupy.cuda.runtime.is_hip: + assert fft_func is _fft + else: + assert fft_func is _fftn + else: + assert fft_func is _fft + + # only a single axis is transformed -> 1d plan preferred + for axes in [(0,), (1,), (2,)]: + assert _default_fft_func(ca, axes=axes) is _fft + + # non-contiguous axes -> nd plan not possible + assert _default_fft_func(ca, axes=(0, 2)) is _fft + + # >3 axes transformed -> nd plan not possible + ca = cupy.ones((2, 4, 6, 8)) + assert _default_fft_func(ca) is _fft + + # first or last axis not included -> nd plan not possible + assert _default_fft_func(ca, axes=(1,)) is _fft + + # for rfftn + ca = cupy.random.random((4, 2, 6)) + for s, axes in zip([(3, 4), None, (8, 7, 5)], [(-2, -1), (0, 1), None]): + fft_func = _default_fft_func(ca, s=s, axes=axes, value_type="R2C") + if enable_nd: + # TODO(leofang): test newer ROCm versions + if axes == (0, 1) and cupy.cuda.runtime.is_hip: + assert fft_func is _fft + else: + assert fft_func is _fftn + else: + assert fft_func is _fft + + # nd plan not possible if last axis is not 0 or ndim-1 + assert _default_fft_func(ca, axes=(2, 1), value_type="R2C") is _fft + + # for irfftn + ca = cupy.random.random((4, 2, 6)).astype(cupy.complex128) + for s, axes in zip([(3, 4), None, (8, 7, 5)], [(-2, -1), (0, 1), None]): + fft_func = _default_fft_func(ca, s=s, axes=axes, value_type="C2R") + if enable_nd: + # To get around hipFFT's bug, we don't use PlanNd for C2R + # TODO(leofang): test newer ROCm versions + if cupy.cuda.runtime.is_hip: + assert fft_func is _fft + else: + assert fft_func is _fftn + else: + assert fft_func is _fft + + # nd plan not possible if last axis is not 0 or ndim-1 + assert _default_fft_func(ca, axes=(2, 1), value_type="C2R") is _fft + + +@pytest.mark.skip("memory management is not supported") +@testing.with_requires("numpy>=2.0") +@testing.slow +class TestFftAllocate: + + def test_fft_allocate(self): + # Check CuFFTError is not raised when the GPU memory is enough. + # See https://github.com/cupy/cupy/issues/1063 + # TODO(mizuno): Simplify "a" after memory compaction is implemented. + a = [] + for i in range(10): + a.append(cupy.empty(100000000)) + del a + b = cupy.empty(100000007, dtype=cupy.float32) + cupy.fft.fft(b) + # Free huge memory for slow test + del b + cupy.get_default_memory_pool().free_all_blocks() + # Clean up FFT plan cache + cupy.fft.config.clear_plan_cache() + + +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( testing.product_dict( [ - # some of the following cases are modified, since in NumPy 2.0.0 - # `s` must contain only integer `s`, not None values, and - # If `s` is not None, `axes` must not be None either. {"shape": (3, 4), "s": None, "axes": None}, - {"shape": (3, 4), "s": (1, 4), "axes": (0, 1)}, - {"shape": (3, 4), "s": (1, 5), "axes": (0, 1)}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-1, -2)}, # {"shape": (3, 4), "s": None, "axes": (0,)}, # mkl_fft gh-109 + {"shape": (3, 4), "s": None, "axes": None}, # {"shape": (3, 4), "s": None, "axes": ()}, # mkl_fft gh-108 {"shape": (2, 3, 4), "s": None, "axes": None}, - {"shape": (2, 3, 4), "s": (1, 4, 4), "axes": (0, 1, 2)}, - {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (0, 1, 2)}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-2, -1)}, {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, # {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, # mkl_fft gh-109 + {"shape": (2, 3, 4), "s": None, "axes": None}, # {"shape": (2, 3, 4), "s": None, "axes": ()}, # mkl_fft gh-108 # {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, # mkl_fft gh-109 {"shape": (2, 3, 4, 5), "s": None, "axes": None}, # {"shape": (0, 5), "s": None, "axes": None}, # mkl_fft gh-110 # {"shape": (2, 0, 5), "s": None, "axes": None}, # mkl_fft gh-110 # {"shape": (0, 0, 5), "s": None, "axes": None}, # mkl_fft gh-110 - {"shape": (3, 4), "s": (0, 5), "axes": (0, 1)}, - {"shape": (3, 4), "s": (1, 0), "axes": (0, 1)}, + {"shape": (3, 4), "s": (0, 5), "axes": (-2, -1)}, + {"shape": (3, 4), "s": (1, 0), "axes": (-2, -1)}, ], - testing.product( - {"norm": [None, "backward", "ortho", "forward", ""]} - ), + testing.product({"norm": [None, "backward", "ortho", "forward"]}), ) ) ) class TestFft2: + + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_fft2(self, xp, dtype, order): + def test_fft2(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.fft2(a, s=self.s, axes=self.axes, norm=self.norm) + + with warnings.catch_warnings(): + # axis=None and s != None, NumPy 2.0 + warnings.simplefilter("ignore", DeprecationWarning) + out = xp.fft.fft2(a, s=self.s, axes=self.axes, norm=self.norm) if self.axes is not None and not self.axes: assert out is a return out - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - return out + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_ifft2(self, xp, dtype, order): + def test_ifft2(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.ifft2(a, s=self.s, axes=self.axes, norm=self.norm) + + with warnings.catch_warnings(): + # axis=None and s != None, NumPy 2.0 + warnings.simplefilter("ignore", DeprecationWarning) + out = xp.fft.ifft2(a, s=self.s, axes=self.axes, norm=self.norm) if self.axes is not None and not self.axes: assert out is a return out - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - return out +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( testing.product_dict( [ - # some of the following cases are modified, since in NumPy 2.0.0 - # `s` must contain only integer `s`, not None values, and - # If `s` is not None, `axes` must not be None either. {"shape": (3, 4), "s": None, "axes": None}, - {"shape": (3, 4), "s": (1, 4), "axes": (0, 1)}, - {"shape": (3, 4), "s": (1, 5), "axes": (0, 1)}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-1, -2)}, {"shape": (3, 4), "s": None, "axes": [-1, -2]}, # {"shape": (3, 4), "s": None, "axes": (0,)}, # mkl_fft gh-109 # {"shape": (3, 4), "s": None, "axes": ()}, # mkl_fft gh-108 + {"shape": (3, 4), "s": None, "axes": None}, {"shape": (2, 3, 4), "s": None, "axes": None}, - {"shape": (2, 3, 4), "s": (1, 4, 4), "axes": (0, 1, 2)}, - {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (0, 1, 2)}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, # {"shape": (2, 3, 4), "s": None, "axes": (-1, -3)}, # mkl_fft gh-109 # {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, # mkl_fft gh-109 + {"shape": (2, 3, 4), "s": None, "axes": None}, # {"shape": (2, 3, 4), "s": None, "axes": ()}, # mkl_fft gh-108 # {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, # mkl_fft gh-109 {"shape": (2, 3, 4), "s": (4, 3, 2), "axes": (2, 0, 1)}, @@ -232,62 +486,402 @@ def test_ifft2(self, xp, dtype, order): # {"shape": (2, 0, 5), "s": None, "axes": None}, # mkl_fft gh-110 # {"shape": (0, 0, 5), "s": None, "axes": None}, # mkl_fft gh-110 ], - testing.product( - {"norm": [None, "backward", "ortho", "forward", ""]} - ), + testing.product({"norm": [None, "backward", "ortho", "forward"]}), ) ) ) class TestFftn: + + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_fftn(self, xp, dtype, order): + def test_fftn(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm) + + with warnings.catch_warnings(): + # axis=None and s != None, NumPy 2.0 + warnings.simplefilter("ignore", DeprecationWarning) + out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm) if self.axes is not None and not self.axes: assert out is a return out - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - return out + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_ifftn(self, xp, dtype, order): + def test_ifftn(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm) + + with warnings.catch_warnings(): + # axis=None and s != None, NumPy 2.0 + warnings.simplefilter("ignore", DeprecationWarning) + out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm) if self.axes is not None and not self.axes: assert out is a return out - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - return out +@pytest.mark.skip("get_fft_plan() is not supported") +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *( + testing.product_dict( + [ + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (-1, -2)}, + {"shape": (3, 4), "s": None, "axes": (0,)}, + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (-2, -1)}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, + {"shape": (0, 5), "s": None, "axes": None}, + {"shape": (2, 0, 5), "s": None, "axes": None}, + {"shape": (0, 0, 5), "s": None, "axes": None}, + ], + testing.product({"norm": [None, "backward", "ortho", "forward"]}), + ) + ) +) +class TestPlanCtxManagerFftn: + + @pytest.fixture(autouse=True) + def skip_buggy(self): + if cupy.cuda.runtime.is_hip: + # TODO(leofang): test newer ROCm versions + if self.axes == (0, 1) and self.shape == (2, 3, 4): + pytest.skip( + "hipFFT's PlanNd for this case " + "is buggy, so Plan1d is generated " + "instead" + ) + + @nd_planning_states() + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fftn(self, xp, dtype, enable_nd): + assert config.enable_nd_planning == enable_nd + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + plan = get_fft_plan(a, self.s, self.axes) + with plan: + return xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm) + + @nd_planning_states() + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifftn(self, xp, dtype, enable_nd): + assert config.enable_nd_planning == enable_nd + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + plan = get_fft_plan(a, self.s, self.axes) + with plan: + return xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm) + + @nd_planning_states() + @testing.for_complex_dtypes() + def test_fftn_error_on_wrong_plan(self, dtype, enable_nd): + if 0 in self.shape: + pytest.skip("0 in shape") + # This test ensures the context manager plan is picked up + + from cupy.fft import fftn + from cupyx.scipy.fftpack import get_fft_plan + + assert config.enable_nd_planning == enable_nd + + # can't get a plan, so skip + if self.axes is not None: + if self.s is not None: + if len(self.s) != len(self.axes): + return + elif len(self.shape) != len(self.axes): + return + + a = testing.shaped_random(self.shape, cupy, dtype) + bad_in_shape = tuple(2 * i for i in self.shape) + if self.s is None: + bad_out_shape = bad_in_shape + else: + bad_out_shape = tuple(2 * i for i in self.s) + b = testing.shaped_random(bad_in_shape, cupy, dtype) + plan_wrong = get_fft_plan(b, bad_out_shape, self.axes) + + with pytest.raises(ValueError) as ex, plan_wrong: + fftn(a, s=self.s, axes=self.axes, norm=self.norm) + # targeting a particular error + assert "The cuFFT plan and a.shape do not match" in str(ex.value) + + +@pytest.mark.skip("get_fft_plan() is not supported") +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *testing.product( + { + "n": [None, 5, 10, 15], + "shape": [ + (10,), + ], + "norm": [None, "backward", "ortho", "forward"], + } + ) +) +class TestPlanCtxManagerFft: + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.fft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape) + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.fft(a, n=self.n, norm=self.norm) + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.ifft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape) + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.ifft(a, n=self.n, norm=self.norm) + + @testing.for_complex_dtypes() + def test_fft_error_on_wrong_plan(self, dtype): + # This test ensures the context manager plan is picked up + + from cupy.fft import fft + from cupyx.scipy.fftpack import get_fft_plan + + a = testing.shaped_random(self.shape, cupy, dtype) + bad_shape = tuple(5 * i for i in self.shape) + b = testing.shaped_random(bad_shape, cupy, dtype) + plan_wrong = get_fft_plan(b) + assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d) + + with pytest.raises(ValueError) as ex, plan_wrong: + fft(a, n=self.n, norm=self.norm) + # targeting a particular error + assert "Target array size does not match the plan." in str(ex.value) + + +# Almost identical to the TestPlanCtxManagerFft class, except that +# 1. multi-GPU cuFFT is used +# 2. the tested parameter combinations are adjusted to meet the requirements +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *testing.product( + { + "n": [None, 64], + "shape": [(64,), (128,)], + "norm": [None, "backward", "ortho", "forward", ""], + } + ) +) +@pytest.mark.skip("get_fft_plan() is not supported") +@testing.multi_gpu(2) +class TestMultiGpuPlanCtxManagerFft: + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_fft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) + + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.fft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape) + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.fft(a, n=self.n, norm=self.norm) + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_ifft(self, xp, dtype): + _skip_multi_gpu_bug(self.shape, self.gpus) + + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.ifft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape) + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.ifft(a, n=self.n, norm=self.norm) + + @multi_gpu_config(gpu_configs=[[0, 1], [1, 0]]) + @testing.for_complex_dtypes() + def test_fft_error_on_wrong_plan(self, dtype): + # This test ensures the context manager plan is picked up + + from cupy.fft import fft + from cupyx.scipy.fftpack import get_fft_plan + + a = testing.shaped_random(self.shape, cupy, dtype) + bad_shape = tuple(4 * i for i in self.shape) + b = testing.shaped_random(bad_shape, cupy, dtype) + plan_wrong = get_fft_plan(b) + assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d) + + with pytest.raises(ValueError) as ex, plan_wrong: + fft(a, n=self.n, norm=self.norm) + # targeting a particular error + if self.norm == "": + # if norm is invalid, we still get ValueError, but it's raised + # when checking norm, earlier than the plan check + return # skip + assert "Target array size does not match the plan." in str(ex.value) + + +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *( + testing.product_dict( + [ + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (3, 4), "s": None, "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (-1, -2)}, + {"shape": (3, 4), "s": None, "axes": (0,)}, + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (1, 4, None), "axes": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": None}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4, 5), "s": None, "axes": (-3, -2, -1)}, + ], + testing.product( + {"norm": [None, "backward", "ortho", "forward", ""]} + ), + ) + ) +) +@pytest.mark.skip("default FFT function is not supported") +class TestFftnContiguity: + + @nd_planning_states([True]) + @testing.for_all_dtypes() + def test_fftn_orders(self, dtype, enable_nd): + for order in ["C", "F"]: + a = testing.shaped_random(self.shape, cupy, dtype) + if order == "F": + a = cupy.asfortranarray(a) + out = cupy.fft.fftn(a, s=self.s, axes=self.axes) + + fft_func = _default_fft_func(a, s=self.s, axes=self.axes) + if fft_func is _fftn: + # nd plans have output with contiguity matching the input + assert out.flags.c_contiguous == a.flags.c_contiguous + assert out.flags.f_contiguous == a.flags.f_contiguous + else: + # 1d planning case doesn't guarantee preserved contiguity + pass + + @nd_planning_states([True]) + @testing.for_all_dtypes() + def test_ifftn_orders(self, dtype, enable_nd): + for order in ["C", "F"]: + + a = testing.shaped_random(self.shape, cupy, dtype) + if order == "F": + a = cupy.asfortranarray(a) + out = cupy.fft.ifftn(a, s=self.s, axes=self.axes) + + fft_func = _default_fft_func(a, s=self.s, axes=self.axes) + if fft_func is _fftn: + # nd plans have output with contiguity matching the input + assert out.flags.c_contiguous == a.flags.c_contiguous + assert out.flags.f_contiguous == a.flags.f_contiguous + else: + # 1d planning case doesn't guarantee preserved contiguity + pass + + +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *testing.product( @@ -299,9 +893,10 @@ def test_ifftn(self, xp, dtype, order): ) ) class TestRfft: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, @@ -309,16 +904,11 @@ class TestRfft: ) def test_rfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.rfft(a, n=self.n, norm=self.norm) - - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.rfft(a, n=self.n, norm=self.norm) @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=2e-6, accept_error=ValueError, contiguous_check=False, @@ -328,32 +918,106 @@ def test_irfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.irfft(a, n=self.n, norm=self.norm) - if xp is np and dtype in [np.float16, np.float32, np.complex64]: + if dtype == xp.float16 and xp is cupy: + # XXX: np2.0: f16 dtypes differ + out = out.astype(np.float16) + elif ( + xp is np + and np.lib.NumpyVersion(np.__version__) < "2.0.0" + and dtype == np.float32 + ): out = out.astype(np.float32) return out +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *testing.product( + { + "n": [None, 5, 10, 15], + "shape": [(10,)], + "norm": [None, "backward", "ortho", "forward"], + } + ) +) +@pytest.mark.skip("get_fft_plan() is not supported") +class TestPlanCtxManagerRfft: + + @testing.for_all_dtypes(no_complex=True) + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.rfft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape, value_type="R2C") + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.rfft(a, n=self.n, norm=self.norm) + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfft(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.irfft(a, n=self.n, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + shape = (self.n,) if self.n is not None else None + plan = get_fft_plan(a, shape=shape, value_type="C2R") + assert isinstance(plan, cupy.cuda.cufft.Plan1d) + with plan: + return xp.fft.irfft(a, n=self.n, norm=self.norm) + + @testing.for_all_dtypes(no_complex=True) + def test_rfft_error_on_wrong_plan(self, dtype): + # This test ensures the context manager plan is picked up + + from cupy.fft import rfft + from cupyx.scipy.fftpack import get_fft_plan + + a = testing.shaped_random(self.shape, cupy, dtype) + bad_shape = tuple(5 * i for i in self.shape) + b = testing.shaped_random(bad_shape, cupy, dtype) + plan_wrong = get_fft_plan(b, value_type="R2C") + assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d) + + with pytest.raises(ValueError) as ex, plan_wrong: + rfft(a, n=self.n, norm=self.norm) + # targeting a particular error + assert "Target array size does not match the plan." in str(ex.value) + + +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( testing.product_dict( [ - # some of the following cases are modified, since in NumPy 2.0.0 - # `s` must contain only integer `s`, not None values, and - # If `s` is not None, `axes` must not be None either. {"shape": (3, 4), "s": None, "axes": None}, - {"shape": (3, 4), "s": (1, 4), "axes": (0, 1)}, - {"shape": (3, 4), "s": (1, 5), "axes": (0, 1)}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-1, -2)}, {"shape": (3, 4), "s": None, "axes": (0,)}, + {"shape": (3, 4), "s": None, "axes": None}, # {"shape": (2, 3, 4), "s": None, "axes": None}, # mkl_fft gh-116 - # {"shape": (2, 3, 4), "s": (1, 4, 4), "axes": (0, 1, 2)}, # mkl_fft gh-115 - # {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (0, 1, 2)}, # mkl_fft gh-115 + # {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, # mkl_fft gh-115 # {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, # mkl_fft gh-116 # {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, # mkl_fft gh-116 {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, # {"shape": (2, 3, 4, 5), "s": None, "axes": None}, # mkl_fft gh-109 and gh-116 ], @@ -364,54 +1028,60 @@ def test_irfft(self, xp, dtype): ) ) class TestRfft2: + + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_rfft2(self, xp, dtype, order): + def test_rfft2(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm) - - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm) + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_irfft2(self, xp, dtype, order): + def test_irfft2(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd + if self.s is None and self.axes in [None, (-2, -1)]: pytest.skip("Input is not Hermitian Symmetric") + elif dtype == xp.float16 and xp is cupy: + pytest.xfail("XXX: np2.0: f16 dtypes differ") + elif ( + np.lib.NumpyVersion(np.__version__) < "2.0.0" + and dtype == np.float32 + ): + pytest.skip("dtypes differ") + a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm) - - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.float32) - - return out + return xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm) +# @testing.with_requires("numpy>=2.0") @testing.parameterize( {"shape": (3, 4), "s": None, "axes": (), "norm": None}, {"shape": (2, 3, 4), "s": None, "axes": (), "norm": None}, ) class TestRfft2EmptyAxes: + @testing.for_all_dtypes(no_complex=True) def test_rfft2(self, dtype): for xp in (np, cupy): @@ -427,26 +1097,24 @@ def test_irfft2(self, dtype): xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm) +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *( testing.product_dict( [ - # some of the following cases are modified, since in NumPy 2.0.0 - # `s` must contain only integer `s`, not None values, and - # If `s` is not None, `axes` must not be None either. {"shape": (3, 4), "s": None, "axes": None}, - {"shape": (3, 4), "s": (1, 4), "axes": (0, 1)}, - {"shape": (3, 4), "s": (1, 5), "axes": (0, 1)}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-2, -1)}, {"shape": (3, 4), "s": None, "axes": (-1, -2)}, {"shape": (3, 4), "s": None, "axes": (0,)}, - # {"shape": (2, 3, 4), "s": None, "axes": None}, # mkl_fft gh-116 - # {"shape": (2, 3, 4), "s": (1, 4, 4), "axes": (0, 1, 2)}, # mkl_fft gh-115 - # {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (0, 1, 2)}, # mkl_fft gh-115 + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + # {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, # mkl_fft gh-115 # {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, # mkl_fft gh-116 # {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, # mkl_fft gh-116 {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, # {"shape": (2, 3, 4, 5), "s": None, "axes": None}, # mkl_fft gh-109 and gh-116 ], @@ -457,54 +1125,207 @@ def test_irfft2(self, dtype): ) ) class TestRfftn: + + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_rfftn(self, xp, dtype, order): + def test_rfftn(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm) - - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.complex64) - - return out + return xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm) + @nd_planning_states() @testing.for_orders("CF") @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, type_check=has_support_aspect64(), ) - def test_irfftn(self, xp, dtype, order): + def test_irfftn(self, xp, dtype, order, enable_nd): + # assert config.enable_nd_planning == enable_nd + if self.s is None and self.axes in [None, (-2, -1)]: pytest.skip("Input is not Hermitian Symmetric") + elif dtype == xp.float16 and xp is cupy: + pytest.xfail("XXX: np2.0: f16 dtypes differ") + elif ( + np.lib.NumpyVersion(np.__version__) < "2.0.0" + and dtype == np.float32 + ): + pytest.skip("dtypes differ") + a = testing.shaped_random(self.shape, xp, dtype) if order == "F": a = xp.asfortranarray(a) - out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) + return xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) - if xp is np and dtype in [np.float16, np.float32, np.complex64]: - out = out.astype(np.float32) - return out +# Only those tests in which a legit plan can be obtained are kept +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *( + testing.product_dict( + [ + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (3, 4), "s": (1, 5), "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (0,)}, + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (2, 3), "axes": (0, 1, 2)}, + ], + testing.product({"norm": [None, "backward", "ortho", "forward"]}), + ) + ) +) +@pytest.mark.skip("get_fft_plan() is not supported") +class TestPlanCtxManagerRfftn: + + @pytest.fixture(autouse=True) + def skip_buggy(self): + if cupy.cuda.runtime.is_hip: + # TODO(leofang): test newer ROCm versions + if self.axes == (0, 1) and self.shape == (2, 3, 4): + pytest.skip( + "hipFFT's PlanNd for this case " + "is buggy, so Plan1d is generated " + "instead" + ) + + @nd_planning_states() + @testing.for_all_dtypes(no_complex=True) + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_rfftn(self, xp, dtype, enable_nd): + assert config.enable_nd_planning == enable_nd + a = testing.shaped_random(self.shape, xp, dtype) + + if xp is np: + return xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + plan = get_fft_plan(a, self.s, self.axes, value_type="R2C") + with plan: + return xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm) + @nd_planning_states() + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose( + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False + ) + def test_irfftn(self, xp, dtype, enable_nd): + assert config.enable_nd_planning == enable_nd + a = testing.shaped_random(self.shape, xp, dtype) + + if dtype == xp.float16 and xp is cupy: + pytest.xfail("XXX: np2.0: f16 dtypes differ") + + if xp is np: + return xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) + + from cupyx.scipy.fftpack import get_fft_plan + + plan = get_fft_plan(a, self.s, self.axes, value_type="C2R") + with plan: + return xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) + + # TODO(leofang): write test_rfftn_error_on_wrong_plan()? + + +@testing.with_requires("numpy>=2.0") +@pytest.mark.usefixtures("skip_forward_backward") +@testing.parameterize( + *( + testing.product_dict( + [ + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (3, 4), "s": None, "axes": (-2, -1)}, + {"shape": (3, 4), "s": None, "axes": (-1, -2)}, + {"shape": (3, 4), "s": None, "axes": (0,)}, + {"shape": (3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4), "s": (1, 4, 10), "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-3, -2, -1)}, + {"shape": (2, 3, 4), "s": None, "axes": (-1, -2, -3)}, + {"shape": (2, 3, 4), "s": None, "axes": (0, 1)}, + {"shape": (2, 3, 4), "s": None, "axes": None}, + {"shape": (2, 3, 4, 5), "s": None, "axes": None}, + ], + testing.product({"norm": [None, "backward", "ortho", "forward"]}), + ) + ) +) +@pytest.mark.skip("default FFT function is not supported") +class TestRfftnContiguity: + + @nd_planning_states([True]) + @testing.for_float_dtypes() + def test_rfftn_orders(self, dtype, enable_nd): + for order in ["C", "F"]: + a = testing.shaped_random(self.shape, cupy, dtype) + if order == "F": + a = cupy.asfortranarray(a) + out = cupy.fft.rfftn(a, s=self.s, axes=self.axes) + + fft_func = _default_fft_func( + a, s=self.s, axes=self.axes, value_type="R2C" + ) + if fft_func is _fftn: + # nd plans have output with contiguity matching the input + assert out.flags.c_contiguous == a.flags.c_contiguous + assert out.flags.f_contiguous == a.flags.f_contiguous + else: + # 1d planning case doesn't guarantee preserved contiguity + pass + + @nd_planning_states([True]) + @testing.for_all_dtypes() + def test_ifftn_orders(self, dtype, enable_nd): + for order in ["C", "F"]: + + a = testing.shaped_random(self.shape, cupy, dtype) + if order == "F": + a = cupy.asfortranarray(a) + out = cupy.fft.irfftn(a, s=self.s, axes=self.axes) + + fft_func = _default_fft_func( + a, s=self.s, axes=self.axes, value_type="C2R" + ) + if fft_func is _fftn: + # nd plans have output with contiguity matching the input + assert out.flags.c_contiguous == a.flags.c_contiguous + assert out.flags.f_contiguous == a.flags.f_contiguous + else: + # 1d planning case doesn't guarantee preserved contiguity + pass + + +# @testing.with_requires("numpy>=2.0") @testing.parameterize( {"shape": (3, 4), "s": None, "axes": (), "norm": None}, {"shape": (2, 3, 4), "s": None, "axes": (), "norm": None}, ) class TestRfftnEmptyAxes: + @testing.for_all_dtypes(no_complex=True) def test_rfftn(self, dtype): for xp in (np, cupy): @@ -520,20 +1341,22 @@ def test_irfftn(self, dtype): xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm) +# @testing.with_requires("numpy>=2.0") @pytest.mark.usefixtures("skip_forward_backward") @testing.parameterize( *testing.product( { "n": [None, 5, 10, 15], "shape": [(10,), (10, 10)], - "norm": [None, "backward", "ortho", "forward", ""], + "norm": [None, "backward", "ortho", "forward"], } ) ) class TestHfft: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=2e-6, accept_error=ValueError, contiguous_check=False, @@ -543,14 +1366,21 @@ def test_hfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.hfft(a, n=self.n, norm=self.norm) - if xp is np and dtype in [np.float16, np.float32, np.complex64]: + if dtype == xp.float16 and xp is cupy: + # XXX: np2.0: f16 dtypes differ + out = out.astype(np.float16) + elif ( + xp is np + and np.lib.NumpyVersion(np.__version__) < "2.0.0" + and dtype == np.float32 + ): out = out.astype(np.float32) return out @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, accept_error=ValueError, contiguous_check=False, @@ -560,39 +1390,46 @@ def test_ihfft(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) out = xp.fft.ihfft(a, n=self.n, norm=self.norm) - if xp is np and dtype in [np.float16, np.float32, np.complex64]: + if ( + xp is np + and np.lib.NumpyVersion(np.__version__) < "2.0.0" + and dtype == np.float32 + ): out = out.astype(np.complex64) return out +# @testing.with_requires("numpy>=2.0") @testing.parameterize( {"n": 1, "d": 1}, {"n": 10, "d": 0.5}, {"n": 100, "d": 2}, ) class TestFftfreq: + + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, + contiguous_check=False, type_check=has_support_aspect64(), ) - def test_fftfreq(self, xp): - out = xp.fft.fftfreq(self.n, self.d) - - return out + def test_fftfreq(self, xp, dtype): + return xp.fft.fftfreq(self.n, self.d) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, + contiguous_check=False, type_check=has_support_aspect64(), ) - def test_rfftfreq(self, xp): - out = xp.fft.rfftfreq(self.n, self.d) - - return out + def test_rfftfreq(self, xp, dtype): + return xp.fft.rfftfreq(self.n, self.d) +# @testing.with_requires("numpy>=2.0") @testing.parameterize( {"shape": (5,), "axes": None}, {"shape": (5,), "axes": 0}, @@ -603,26 +1440,54 @@ def test_rfftfreq(self, xp): {"shape": (10, 10), "axes": (0, 1)}, ) class TestFftshift: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, + contiguous_check=False, type_check=has_support_aspect64(), ) def test_fftshift(self, xp, dtype): x = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.fftshift(x, self.axes) - - return out + return xp.fft.fftshift(x, self.axes) @testing.for_all_dtypes() @testing.numpy_cupy_allclose( - rtol=1e-4, + rtol=1e-3, atol=1e-7, + contiguous_check=False, type_check=has_support_aspect64(), ) def test_ifftshift(self, xp, dtype): x = testing.shaped_random(self.shape, xp, dtype) - out = xp.fft.ifftshift(x, self.axes) + return xp.fft.ifftshift(x, self.axes) - return out + +@pytest.mark.skip("no threading support") +class TestThreading: + + def test_threading1(self): + import threading + + from cupy.cuda.cufft import get_current_plan + + def thread_get_curr_plan(): + cupy.cuda.Device().use() + return get_current_plan() + + new_thread = threading.Thread(target=thread_get_curr_plan) + new_thread.start() + + def test_threading2(self): + import threading + + a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10) + + def thread_do_fft(): + cupy.cuda.Device().use() + b = cupy.fft.fftn(a) + return b + + new_thread = threading.Thread(target=thread_do_fft) + new_thread.start() diff --git a/dpnp/tests/third_party/cupy/indexing_tests/test_generate.py b/dpnp/tests/third_party/cupy/indexing_tests/test_generate.py index 61d7bf83ac2e..52683ee6a9bf 100644 --- a/dpnp/tests/third_party/cupy/indexing_tests/test_generate.py +++ b/dpnp/tests/third_party/cupy/indexing_tests/test_generate.py @@ -8,6 +8,7 @@ class TestIndices(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_indices_list0(self, xp, dtype): @@ -31,6 +32,7 @@ def test_indices_list3(self): class TestIX_(unittest.TestCase): + @pytest.mark.skip("List input is not supported") @testing.numpy_cupy_array_equal() def test_ix_list(self, xp): @@ -52,6 +54,7 @@ def test_ix_bool_ndarray(self, xp): @pytest.mark.skip("r_[] is not supported yet") class TestR_(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_r_1(self, xp, dtype): @@ -113,6 +116,7 @@ def test_r_scalars(self, xp): @pytest.mark.skip("c_[] is not supported yet") class TestC_(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_c_1(self, xp, dtype): @@ -138,6 +142,7 @@ def test_c_3(self, dtype): @pytest.mark.skip("no AxisConcatenator is provided") class TestAxisConcatenator(unittest.TestCase): + def test_AxisConcatenator_init1(self): with self.assertRaises(TypeError): generate.AxisConcatenator.__init__() @@ -148,6 +153,7 @@ def test_len(self): class TestUnravelIndex(unittest.TestCase): + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes() @testing.numpy_cupy_array_equal(type_check=False) @@ -182,6 +188,7 @@ def test_invalid_dtype(self, order, dtype): class TestRavelMultiIndex(unittest.TestCase): + @testing.for_orders(["C", "F", None]) @testing.for_int_dtypes() @testing.numpy_cupy_array_equal() @@ -288,6 +295,7 @@ def test_invalid_mode(self, dtype): class TestMaskIndices: + @testing.numpy_cupy_array_equal() def test_mask_indices(self, xp): # arr is a square matrix with 50% density @@ -305,6 +313,7 @@ def test_empty(self, xp): class TestTrilIndices: + @testing.numpy_cupy_array_equal() def test_tril_indices_1(self, xp): return xp.tril_indices(n=29, k=0) @@ -330,6 +339,7 @@ def test_tril_indices(self, dtype): class TestTrilIndicesForm: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_tril_indices_from_1(self, xp, dtype): @@ -356,6 +366,7 @@ def test_tril_indices_from_4(self, dtype): class TestTriuIndices: + @testing.numpy_cupy_array_equal() def test_triu_indices_1(self, xp): return xp.triu_indices(n=10, k=0) @@ -381,6 +392,7 @@ def test_triu_indices_4(self, dtype): class TestTriuIndicesFrom: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_triu_indices_from_1(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/indexing_tests/test_indexing.py b/dpnp/tests/third_party/cupy/indexing_tests/test_indexing.py index 4e858cb0acda..a977b44bbdbd 100644 --- a/dpnp/tests/third_party/cupy/indexing_tests/test_indexing.py +++ b/dpnp/tests/third_party/cupy/indexing_tests/test_indexing.py @@ -9,6 +9,7 @@ class TestIndexing(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_take_by_scalar(self, xp): a = testing.shaped_arange((2, 4, 3), xp) @@ -199,6 +200,7 @@ def test_extract_empty_1dim(self, xp): class TestChoose(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_choose(self, xp, dtype): @@ -265,6 +267,8 @@ def test_choose_broadcast_fail(self, dtype): class TestSelect(unittest.TestCase): + + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_select(self, xp, dtype): @@ -273,6 +277,7 @@ def test_select(self, xp, dtype): choicelist = [a, a**2] return xp.select(condlist, choicelist) + @testing.with_requires("numpy>=2.0") @testing.for_complex_dtypes() @testing.numpy_cupy_array_almost_equal() def test_select_complex(self, xp, dtype): @@ -281,6 +286,7 @@ def test_select_complex(self, xp, dtype): choicelist = [a, a**2] return xp.select(condlist, choicelist) + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_select_default(self, xp, dtype): @@ -290,6 +296,7 @@ def test_select_default(self, xp, dtype): default = 3 return xp.select(condlist, choicelist, default) + @testing.with_requires("numpy>=2.0") @testing.for_complex_dtypes() @testing.numpy_cupy_array_almost_equal() def test_select_default_complex(self, xp, dtype): @@ -299,6 +306,7 @@ def test_select_default_complex(self, xp, dtype): default = 3 return xp.select(condlist, choicelist, default) + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_select_odd_shaped_broadcastable(self, xp, dtype): @@ -308,6 +316,7 @@ def test_select_odd_shaped_broadcastable(self, xp, dtype): choicelist = [a, b] return xp.select(condlist, choicelist) + @testing.with_requires("numpy>=2.0") @testing.for_complex_dtypes() @testing.numpy_cupy_allclose(rtol=1e-5) def test_select_odd_shaped_broadcastable_complex(self, xp, dtype): @@ -326,6 +335,7 @@ def test_select_1D_choicelist(self, xp, dtype): choicelist = [a, b] return xp.select(condlist, choicelist) + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_select_choicelist_condlist_broadcast(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/indexing_tests/test_insert.py b/dpnp/tests/third_party/cupy/indexing_tests/test_insert.py index a60957108c04..f3edc5cadc80 100644 --- a/dpnp/tests/third_party/cupy/indexing_tests/test_insert.py +++ b/dpnp/tests/third_party/cupy/indexing_tests/test_insert.py @@ -16,6 +16,7 @@ ) ) class TestPlace(unittest.TestCase): + # NumPy 1.9 don't wraps values. # https://github.com/numpy/numpy/pull/5821 @testing.for_all_dtypes() @@ -39,6 +40,7 @@ def test_place(self, xp, dtype): ) ) class TestPlaceRaises(unittest.TestCase): + # NumPy 1.9 performs illegal memory access. # https://github.com/numpy/numpy/pull/5821 @testing.with_requires("numpy>=1.10") @@ -74,6 +76,7 @@ def test_place_shape_unmatch_error(self, dtype): ) ) class TestPut(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_put(self, xp, dtype): @@ -101,6 +104,7 @@ def test_put(self, xp, dtype): ) ) class TestPutScalars(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_put_index_scalar(self, xp): dtype = cupy.float32 @@ -130,6 +134,7 @@ def test_put_values_scalar(self, xp): ) ) class TestPutRaises(unittest.TestCase): + @pytest.mark.skip("'raise' mode is not supported") @testing.for_all_dtypes() def test_put_inds_underflow_error(self, dtype): @@ -165,6 +170,7 @@ def test_put_mode_error(self, dtype): *testing.product({"shape": [(0,), (1,), (2, 3), (2, 3, 4)]}) ) class TestPutmaskSameShape(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_putmask(self, xp, dtype): @@ -185,6 +191,7 @@ def test_putmask(self, xp, dtype): ) ) class TestPutmaskDifferentShapes(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_putmask(self, xp, dtype): @@ -200,6 +207,7 @@ def test_putmask(self, xp, dtype): @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestPutmask(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_putmask_scalar_values(self, xp): shape = (2, 3) @@ -211,7 +219,7 @@ def test_putmask_non_equal_shape_raises(self): for xp in (numpy, cupy): a = xp.array([1, 2, 3]) mask = xp.array([True, False]) - with pytest.raises(ValueError): + with pytest.raises((ValueError, IndexError)): xp.putmask(a, mask, a**2) @testing.numpy_cupy_array_equal() @@ -223,6 +231,8 @@ def test_putmask_int_mask_scalar_values(self, xp): class TestPutmaskDifferentDtypes(unittest.TestCase): + + @pytest.mark.skip("putmask() is not fully supported") @testing.for_all_dtypes_combination(names=["a_dtype", "val_dtype"]) def test_putmask_differnt_dtypes_raises(self, a_dtype, val_dtype): shape = (2, 3) @@ -255,6 +265,7 @@ def test_putmask_differnt_dtypes_mask(self, xp, dtype): ) ) class TestFillDiagonal(unittest.TestCase): + def _compute_val(self, xp): if type(self.val) is int: return self.val @@ -299,6 +310,7 @@ def test_1darray(self, dtype): ) ) class TestDiagIndices(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_diag_indices(self, xp): return xp.diag_indices(self.n, self.ndim) @@ -313,6 +325,7 @@ def test_diag_indices(self, xp): ) ) class TestDiagIndicesInvalidValues(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_diag_indices(self, xp): return xp.diag_indices(self.n, self.ndim) @@ -326,6 +339,7 @@ def test_diag_indices(self, xp): ) ) class TestDiagIndicesFrom(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_diag_indices_from(self, xp): arr = testing.shaped_arange(self.shape, xp) @@ -340,6 +354,7 @@ def test_diag_indices_from(self, xp): ) ) class TestDiagIndicesFromRaises(unittest.TestCase): + def test_non_equal_dims(self): for xp in (numpy, cupy): arr = testing.shaped_arange(self.shape, xp) diff --git a/dpnp/tests/third_party/cupy/indexing_tests/test_iterate.py b/dpnp/tests/third_party/cupy/indexing_tests/test_iterate.py index 0c18b7759825..f68af146dd64 100644 --- a/dpnp/tests/third_party/cupy/indexing_tests/test_iterate.py +++ b/dpnp/tests/third_party/cupy/indexing_tests/test_iterate.py @@ -14,6 +14,8 @@ class TestFlatiter(unittest.TestCase): + + @pytest.mark.skip("no base attribute") def test_base(self): for xp in (numpy, cupy): a = xp.zeros((2, 3, 4)) @@ -31,12 +33,14 @@ def test_next(self): for ai, ei in zip(a.flat, e): assert ai == ei + @pytest.mark.skip("no len() method") def test_len(self): for xp in (numpy, cupy): a = xp.zeros((2, 3, 4)) assert len(a.flat) == 24 assert len(a[::2].flat) == 12 + @pytest.mark.skip("no copy attribute") @testing.numpy_cupy_array_equal() def test_copy(self, xp): a = testing.shaped_arange((2, 3, 4), xp) @@ -44,6 +48,7 @@ def test_copy(self, xp): assert a is not o return a.flat.copy() + @pytest.mark.skip("no copy attribute") @testing.numpy_cupy_array_equal() def test_copy_next(self, xp): a = testing.shaped_arange((2, 3, 4), xp) @@ -53,19 +58,20 @@ def test_copy_next(self, xp): @testing.parameterize( - {"shape": (2, 3, 4), "index": Ellipsis}, + # {"shape": (2, 3, 4), "index": Ellipsis}, {"shape": (2, 3, 4), "index": 0}, {"shape": (2, 3, 4), "index": 10}, - {"shape": (2, 3, 4), "index": slice(None)}, - {"shape": (2, 3, 4), "index": slice(None, 10)}, - {"shape": (2, 3, 4), "index": slice(None, None, 2)}, - {"shape": (2, 3, 4), "index": slice(None, None, -1)}, - {"shape": (2, 3, 4), "index": slice(10, None, -1)}, - {"shape": (2, 3, 4), "index": slice(10, None, -2)}, - {"shape": (), "index": slice(None)}, - {"shape": (10,), "index": slice(None)}, + # {"shape": (2, 3, 4), "index": slice(None)}, + # {"shape": (2, 3, 4), "index": slice(None, 10)}, + # {"shape": (2, 3, 4), "index": slice(None, None, 2)}, + # {"shape": (2, 3, 4), "index": slice(None, None, -1)}, + # {"shape": (2, 3, 4), "index": slice(10, None, -1)}, + # {"shape": (2, 3, 4), "index": slice(10, None, -2)}, + # {"shape": (), "index": slice(None)}, + # {"shape": (10,), "index": slice(None)}, ) class TestFlatiterSubscript(unittest.TestCase): + @testing.for_CF_orders() @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -121,11 +127,12 @@ def test_setitem_ndarray_different_types(self, xp, a_dtype, v_dtype, order): {"shape": (2, 3, 4), "index": None}, {"shape": (2, 3, 4), "index": (0,)}, {"shape": (2, 3, 4), "index": True}, - # printing behaviour of dpnp_array ('index') makes imposibble to skip this test - # {'shape': (2, 3, 4), 'index': cupy.array([0])}, + {"shape": (2, 3, 4), "index": cupy.array([0])}, {"shape": (2, 3, 4), "index": [0]}, ) +@pytest.mark.skip("no exception raised") class TestFlatiterSubscriptIndexError(unittest.TestCase): + @testing.for_all_dtypes() def test_getitem(self, dtype): a = testing.shaped_arange(self.shape, cupy, dtype) diff --git a/dpnp/tests/third_party/cupy/io_tests/test_formatting.py b/dpnp/tests/third_party/cupy/io_tests/test_formatting.py new file mode 100644 index 000000000000..36f69883b984 --- /dev/null +++ b/dpnp/tests/third_party/cupy/io_tests/test_formatting.py @@ -0,0 +1,47 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("text formatting is not supported yet", allow_module_level=True) + + +class TestFormatting(unittest.TestCase): + + def test_array_repr(self): + a = testing.shaped_arange((2, 3, 4), cupy) + b = testing.shaped_arange((2, 3, 4), numpy) + assert cupy.array_repr(a) == numpy.array_repr(b) + + def test_array_str(self): + a = testing.shaped_arange((2, 3, 4), cupy) + b = testing.shaped_arange((2, 3, 4), numpy) + assert cupy.array_str(a) == numpy.array_str(b) + + def test_array2string(self): + a = testing.shaped_arange((2, 3, 4), cupy) + b = testing.shaped_arange((2, 3, 4), numpy) + assert cupy.array2string(a) == numpy.array2string(b) + + def test_format_float_positional_python_scalar(self): + x = 1.0 + assert cupy.format_float_positional(x) == numpy.format_float_positional( + x + ) + + def test_format_float_positional(self): + a = testing.shaped_arange((), cupy) + b = testing.shaped_arange((), numpy) + assert cupy.format_float_positional(a) == numpy.format_float_positional( + b + ) + + def test_format_float_scientific(self): + a = testing.shaped_arange((), cupy) + b = testing.shaped_arange((), numpy) + assert cupy.format_float_scientific(a) == numpy.format_float_scientific( + b + ) diff --git a/dpnp/tests/third_party/cupy/io_tests/test_npz.py b/dpnp/tests/third_party/cupy/io_tests/test_npz.py new file mode 100644 index 000000000000..7d42fd24d947 --- /dev/null +++ b/dpnp/tests/third_party/cupy/io_tests/test_npz.py @@ -0,0 +1,104 @@ +import io +import pickle +import unittest + +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip("text functions are not supported yet", allow_module_level=True) + + +class TestNpz(unittest.TestCase): + + @testing.for_all_dtypes() + def test_save_load(self, dtype): + a = testing.shaped_arange((2, 3, 4), dtype=dtype) + sio = io.BytesIO() + cupy.save(sio, a) + s = sio.getvalue() + sio.close() + + sio = io.BytesIO(s) + b = cupy.load(sio) + sio.close() + + testing.assert_array_equal(a, b) + + def test_save_pickle(self): + data = object() + + sio = io.BytesIO() + with self.assertRaises(ValueError): + cupy.save(sio, data, allow_pickle=False) + sio.close() + + sio = io.BytesIO() + cupy.save(sio, data, allow_pickle=True) + sio.close() + + def test_load_pickle(self): + a = testing.shaped_arange((2, 3, 4), dtype=cupy.float32) + + sio = io.BytesIO() + a.dump(sio) + s = sio.getvalue() + sio.close() + + sio = io.BytesIO(s) + b = cupy.load(sio, allow_pickle=True) + testing.assert_array_equal(a, b) + sio.close() + + sio = io.BytesIO(s) + with self.assertRaises(ValueError): + cupy.load(sio, allow_pickle=False) + sio.close() + + @testing.for_all_dtypes() + def check_savez(self, savez, dtype): + a1 = testing.shaped_arange((2, 3, 4), dtype=dtype) + a2 = testing.shaped_arange((3, 4, 5), dtype=dtype) + + sio = io.BytesIO() + savez(sio, a1, a2) + s = sio.getvalue() + sio.close() + + sio = io.BytesIO(s) + with cupy.load(sio) as d: + b1 = d["arr_0"] + b2 = d["arr_1"] + sio.close() + + testing.assert_array_equal(a1, b1) + testing.assert_array_equal(a2, b2) + + def test_savez(self): + self.check_savez(cupy.savez) + + def test_savez_compressed(self): + self.check_savez(cupy.savez_compressed) + + @testing.for_all_dtypes() + def test_pickle(self, dtype): + a = testing.shaped_arange((2, 3, 4), dtype=dtype) + s = pickle.dumps(a) + b = pickle.loads(s) + testing.assert_array_equal(a, b) + + @testing.for_all_dtypes() + def test_dump(self, dtype): + a = testing.shaped_arange((2, 3, 4), dtype=dtype) + + sio = io.BytesIO() + a.dump(sio) + s = sio.getvalue() + sio.close() + + sio = io.BytesIO(s) + b = cupy.load(sio, allow_pickle=True) + sio.close() + + testing.assert_array_equal(a, b) diff --git a/dpnp/tests/third_party/cupy/io_tests/test_text.py b/dpnp/tests/third_party/cupy/io_tests/test_text.py new file mode 100644 index 000000000000..c06dc3aef1d1 --- /dev/null +++ b/dpnp/tests/third_party/cupy/io_tests/test_text.py @@ -0,0 +1,28 @@ +import filecmp +import os +import tempfile +import unittest + +import numpy +import pytest + +import dpnp as cupy + +pytest.skip("text functions are not supported yet", allow_module_level=True) + + +class TestText(unittest.TestCase): + + def test_savetxt(self): + tmp_cupy = tempfile.NamedTemporaryFile(delete=False) + tmp_numpy = tempfile.NamedTemporaryFile(delete=False) + try: + tmp_cupy.close() + tmp_numpy.close() + array = [[1, 2, 3], [2, 3, 4]] + cupy.savetxt(tmp_cupy.name, cupy.array(array)) + numpy.savetxt(tmp_numpy.name, numpy.array(array)) + assert filecmp.cmp(tmp_cupy.name, tmp_numpy.name) + finally: + os.remove(tmp_cupy.name) + os.remove(tmp_numpy.name) diff --git a/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py b/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py new file mode 100644 index 000000000000..3c7103827cd8 --- /dev/null +++ b/dpnp/tests/third_party/cupy/lib_tests/test_polynomial.py @@ -0,0 +1,982 @@ +import numpy +import pytest + +import dpnp as cupy + +# from cupy.cuda import driver +# from cupy.cuda import runtime +# import cupyx +from dpnp.tests.third_party.cupy import testing + +# from cupy.exceptions import RankWarning + +pytest.skip("poly functions are not supported yet", allow_module_level=True) + + +@testing.parameterize( + {"variable": None}, + {"variable": "y"}, +) +class TestPoly1dInit: + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + def test_poly1d_numpy_array(self, xp, dtype): + a = numpy.arange(5, dtype=dtype) + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_cupy_array(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.numpy_cupy_array_equal() + def test_poly1d_list(self, xp): + with cupyx.allow_synchronize(False): + out = xp.poly1d([1, 2, 3, 4], variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_numpy_poly1d(self, xp, dtype): + array = testing.shaped_arange((5,), numpy, dtype) + a = numpy.poly1d(array) + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_numpy_poly1d_variable(self, xp, dtype): + array = testing.shaped_arange((5,), numpy, dtype) + a = numpy.poly1d(array, variable="z") + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "z") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_cupy_poly1d(self, xp, dtype): + array = testing.shaped_arange((5,), xp, dtype) + a = xp.poly1d(array) + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_cupy_poly1d_variable(self, xp, dtype): + array = testing.shaped_arange((5,), xp, dtype) + a = xp.poly1d(array, variable="z") + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "z") + return out + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_zero_dim(self, xp, dtype): + a = testing.shaped_arange((), xp, dtype) + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_zero_size(self, xp, dtype): + a = testing.shaped_arange((0,), xp, dtype) + with cupyx.allow_synchronize(False): + out = xp.poly1d(a, variable=self.variable) + assert out.variable == (self.variable or "x") + return out + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5) + def test_poly1d_roots(self, xp, dtype): + a = testing.shaped_arange((4,), xp, dtype) + out = xp.poly1d(a, True, variable=self.variable) + assert out.variable == (self.variable or "x") + return out.coeffs + + +class TestPoly1d: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_leading_zeros(self, xp, dtype): + a = xp.array([0, 0, 1, 2, 3], dtype) + return xp.poly1d(a) + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + def test_poly1d_neg(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + return -xp.poly1d(a) + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_order(self, xp, dtype): + a = testing.shaped_arange((10,), xp, dtype) + return xp.poly1d(a).order + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_order_leading_zeros(self, xp, dtype): + a = xp.array([0, 0, 1, 2, 3, 0], dtype) + return xp.poly1d(a).order + + @pytest.mark.skipif( + runtime.is_hip and driver.get_build_version() < 402, + reason="syevj not available", + ) + @testing.for_signed_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_poly1d_roots(self, xp, dtype): + a = xp.array([-3, -2.5, 3], dtype) + out = xp.poly1d(a).roots + # The current `cupy.roots` doesn't guarantee the order of results. + return xp.sort(out) + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_getitem1(self, xp, dtype): + a = testing.shaped_arange((10,), xp, dtype) + with cupyx.allow_synchronize(False): + return xp.poly1d(a)[-1] + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_getitem2(self, xp, dtype): + a = testing.shaped_arange((10,), xp, dtype) + with cupyx.allow_synchronize(False): + return xp.poly1d(a)[5] + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_getitem3(self, xp, dtype): + a = testing.shaped_arange((10,), xp, dtype) + with cupyx.allow_synchronize(False): + return xp.poly1d(a)[100] + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_getitem4(self, xp, dtype): + a = xp.array([0, 0, 1, 2, 3, 0], dtype) + with cupyx.allow_synchronize(False): + return xp.poly1d(a)[2] + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_setitem(self, xp, dtype): + a = testing.shaped_arange((10,), xp, dtype) + b = xp.poly1d(a) + with cupyx.allow_synchronize(False): + b[100] = 20 + return b + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_poly1d_setitem_leading_zeros(self, xp, dtype): + a = xp.array([0, 0, 0, 2, 3, 0], dtype) + b = xp.poly1d(a) + with cupyx.allow_synchronize(False): + b[1] = 10 + return b + + @testing.for_all_dtypes() + def test_poly1d_setitem_neg(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((10,), xp, dtype) + b = xp.poly1d(a) + with pytest.raises(ValueError): + b[-1] = 20 + + @testing.for_all_dtypes() + def test_poly1d_get1(self, dtype): + a1 = testing.shaped_arange((10,), cupy, dtype) + a2 = testing.shaped_arange((10,), numpy, dtype) + b1 = cupy.poly1d(a1, variable="z").get() + b2 = numpy.poly1d(a2, variable="z") + assert b1 == b2 + + @testing.for_all_dtypes() + def test_poly1d_get2(self, dtype): + a1 = testing.shaped_arange((), cupy, dtype) + a2 = testing.shaped_arange((), numpy, dtype) + b1 = cupy.poly1d(a1).get() + b2 = numpy.poly1d(a2) + assert b1 == b2 + + @testing.for_all_dtypes(no_bool=True) + def test_poly1d_set(self, dtype): + arr1 = testing.shaped_arange((10,), cupy, dtype) + arr2 = numpy.ones(10, dtype=dtype) + a = cupy.poly1d(arr1) + b = numpy.poly1d(arr2, variable="z") + a.set(b) + assert a.variable == b.variable + testing.assert_array_equal(a.coeffs, b.coeffs) + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_repr(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + return repr(xp.poly1d(a)) + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_str(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + return str(xp.poly1d(a)) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-2, "default": 1e-6}) + def test_poly1d_call(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + b = xp.poly1d(a) + return b(a) + + +class TestPoly: + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose(rtol=1e-4) + def test_poly_1d(self, xp, dtype): + a = testing.shaped_arange((5,), xp, dtype) + return xp.poly(a) + + @testing.for_all_dtypes(no_bool=True, no_float16=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-4) + def test_poly_2d_symmetric_real(self, xp, dtype): + a = xp.array([[6, 3, 1], [3, 0, 5], [1, 5, 6]], dtype) + return xp.poly(a) + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_poly_2d_hermitian_complex(self, xp, dtype): + a = xp.array([[2, -1j], [1j, 1]], dtype) + return xp.poly(a) + + @testing.for_all_dtypes(no_bool=True) + def test_poly_2d_square(self, dtype): + a = testing.shaped_arange((3, 3), cupy, dtype) + with pytest.raises(NotImplementedError): + cupy.poly(a) + + @testing.for_all_dtypes() + def test_poly_2d_general(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((2, 4), xp, dtype) + with pytest.raises(ValueError): + xp.poly(a) + + @testing.for_all_dtypes() + def test_poly_ndim(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((5, 4, 3), xp, dtype) + with pytest.raises(ValueError): + xp.poly(a) + + @testing.for_all_dtypes(no_bool=True) + def test_poly_zero_dim(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((), xp, dtype) + with pytest.raises(TypeError): + numpy.poly(a) + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly_empty(self, xp, dtype): + a = xp.zeros((0), dtype) + return xp.poly(a) + + +@testing.parameterize( + *testing.product( + { + "shape": [(), (0,), (5,)], + "exp": [0, 4, 5, numpy.int32(5), numpy.int64(5)], + } + ) +) +class TestPoly1dPow: + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-1) + def test_poly1d_pow_scalar(self, xp, dtype): + a = testing.shaped_arange(self.shape, xp, dtype) + return xp.poly1d(a) ** self.exp + + +@testing.parameterize( + *testing.product( + { + "shape": [(5,), (5, 2)], + "exp": [-10, 3.5, [1, 2, 3]], + } + ) +) +class TestPoly1dPowInvalidValue: + + @testing.for_all_dtypes() + def test_poly1d_pow(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange(self.shape, xp, dtype) + with pytest.raises(ValueError): + xp.poly1d(a) ** self.exp + + +@testing.parameterize( + *testing.product( + { + "exp": [3.0, numpy.float64(5)], + } + ) +) +class TestPoly1dPowInvalidType: + + @testing.for_all_dtypes() + def test_poly1d_pow(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(TypeError): + xp.poly1d(a) ** self.exp + + +class Poly1dTestBase: + + def _get_input(self, xp, in_type, dtype, *, size=10): + if in_type in ("ndarray", "poly1d"): + array = testing.shaped_arange((size,), xp, dtype) + if array.dtype == numpy.bool_: + # Avoid leading zero-coefficients. + array = xp.logical_not(array) + if in_type == "poly1d": + array = xp.poly1d(array) + return array + if in_type == "python_scalar": + return dtype(5).item() + if in_type == "numpy_scalar": + return dtype(5) + assert False + + +@testing.parameterize( + *testing.product( + { + "func": [ + lambda x, y: x + y, + lambda x, y: x - y, + lambda x, y: x * y, + ], + "type_l": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + "type_r": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + } + ) +) +class TestPoly1dPolynomialArithmetic(Poly1dTestBase): + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, accept_error=TypeError) + def test_poly1d_arithmetic(self, xp, dtype): + if self.type_l == "numpy_scalar" and self.type_r == "poly1d": + pytest.skip("Avoid numpy bug.") + a1 = self._get_input(xp, self.type_l, dtype) + a2 = self._get_input(xp, self.type_r, dtype) + return self.func(a1, a2) + + +@testing.parameterize( + *testing.product( + { + "fname": ["add", "subtract", "multiply", "divide", "power"], + "type_l": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + "type_r": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + } + ) +) +class TestPoly1dMathArithmetic(Poly1dTestBase): + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_poly1d_arithmetic(self, xp, dtype): + func = getattr(xp, self.fname) + a1 = self._get_input(xp, self.type_l, dtype) + a2 = self._get_input(xp, self.type_r, dtype) + return func(a1, a2) + + +@testing.parameterize( + *testing.product( + { + "fname": ["polyadd", "polysub", "polymul"], + "type_l": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + "type_r": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + } + ) +) +class TestPoly1dRoutines(Poly1dTestBase): + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-4, accept_error=TypeError) + def test_poly1d_routine(self, xp, dtype): + func = getattr(xp, self.fname) + a1 = self._get_input(xp, self.type_l, dtype) + a2 = self._get_input(xp, self.type_r, dtype) + return func(a1, a2) + + +class TestPoly1dEquality: + + def make_poly1d1(self, xp, dtype): + a1 = testing.shaped_arange((4,), xp, dtype) + a2 = xp.zeros((4,), dtype) + b1 = xp.poly1d(a1) + b2 = xp.poly1d(a2) + return b1, b2 + + def make_poly1d2(self, xp, dtype): + a1 = testing.shaped_arange((4,), xp, dtype) + a2 = testing.shaped_arange((4,), xp, dtype) + b1 = xp.poly1d(a1) + b2 = xp.poly1d(a2) + return b1, b2 + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_eq1(self, xp, dtype): + a, b = self.make_poly1d1(xp, dtype) + return a == b + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_eq2(self, xp, dtype): + a, b = self.make_poly1d2(xp, dtype) + return a == b + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_ne1(self, xp, dtype): + a, b = self.make_poly1d1(xp, dtype) + return a != b + + @testing.for_all_dtypes() + @testing.numpy_cupy_equal() + def test_poly1d_ne2(self, xp, dtype): + a, b = self.make_poly1d2(xp, dtype) + return a != b + + +@testing.parameterize( + *testing.product( + { + "fname": ["polyadd", "polysub", "polymul"], + "shape1": [(), (0,), (3,), (5,)], + "shape2": [(), (0,), (3,), (5,)], + } + ) +) +class TestPolyArithmeticShapeCombination: + + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_polyroutine(self, xp, dtype): + func = getattr(xp, self.fname) + a = testing.shaped_arange(self.shape1, xp, dtype) + b = testing.shaped_arange(self.shape2, xp, dtype) + return func(a, b) + + +@testing.parameterize( + *testing.product( + { + "fname": ["polyadd", "polysub", "polymul"], + } + ) +) +class TestPolyArithmeticDiffTypes: + + @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) + def test_polyroutine_diff_types_array(self, dtype1, dtype2): + def f(xp): + func = getattr(xp, self.fname) + a = testing.shaped_arange((10,), xp, dtype1) + b = testing.shaped_arange((5,), xp, dtype2) + return func(a, b) + + rtol = 1e-5 + if runtime.is_hip and self.fname == "polymul": + rtol = 1e-4 + try: + testing.assert_allclose(f(cupy), f(numpy), rtol=rtol) + except TypeError: + pass + + @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) + def test_polyroutine_diff_types_poly1d(self, dtype1, dtype2): + def f(xp): + func = getattr(xp, self.fname) + a = testing.shaped_arange((10,), xp, dtype1) + b = testing.shaped_arange((5,), xp, dtype2) + a = xp.poly1d(a, variable="z") + b = xp.poly1d(b, variable="y") + out = func(a, b) + assert out.variable == "x" + return out + + rtol = 1e-5 + if runtime.is_hip and self.fname == "polymul": + rtol = 1e-4 + try: + testing.assert_allclose(f(cupy), f(numpy), rtol=rtol) + except TypeError: + pass + + +@testing.parameterize( + *testing.product( + { + "shape1": [(3,)], + "shape2": [(3,), (3, 2)], + "deg": [0, 3, 3.0, 3.5, 10], + "rcond": [None, 0.5, 1e-15], + "weighted": [True, False], + } + ) +) +class TestPolyfitParametersCombinations: + + def _full_fit(self, xp, dtype): + x = testing.shaped_arange(self.shape1, xp, dtype) + y = testing.shaped_arange(self.shape2, xp, dtype) + w = x if self.weighted else None + return xp.polyfit(x, y, self.deg, self.rcond, True, w) + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose( + atol=1e-9, accept_error=TypeError, contiguous_check=False + ) + def test_polyfit_default(self, xp, dtype): + if runtime.is_hip and self.deg == 0: + pytest.xfail("ROCm/HIP may have a bug") + x = testing.shaped_arange(self.shape1, xp, dtype) + y = testing.shaped_arange(self.shape2, xp, dtype) + w = x if self.weighted else None + return xp.polyfit(x, y, self.deg, self.rcond, w=w) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_full(self, dtype): + if runtime.is_hip and self.deg == 0: + pytest.xfail("ROCm/HIP may have a bug") + + cp_c, cp_resids, cp_rank, cp_s, cp_rcond = self._full_fit(cupy, dtype) + np_c, np_resids, np_rank, np_s, np_rcond = self._full_fit(numpy, dtype) + + testing.assert_allclose(cp_c, np_c, atol=1e-9) + testing.assert_allclose(cp_resids, np_resids, atol=1e-9) + testing.assert_allclose(cp_s, np_s, atol=1e-9) + assert cp_rank == np_rank + if self.rcond is not None: + assert cp_rcond == np_rcond + + +@testing.parameterize( + *testing.product( + { + "shape": [(3,), (3, 2)], + "deg": [0, 1], + "rcond": [None, 1e-15], + "weighted": [True, False], + "cov": ["unscaled", True], + } + ) +) +class TestPolyfitCovMode: + + def _cov_fit(self, xp, dtype): + x = xp.array([0.008, 0.01, 0.015], dtype) + y = testing.shaped_arange(self.shape, xp, dtype) + w = x if self.weighted else None + return xp.polyfit(x, y, self.deg, self.rcond, w=w, cov=self.cov) + + @testing.for_float_dtypes(no_float16=True) + def test_polyfit_cov(self, dtype): + if runtime.is_hip and self.deg == 0: + pytest.xfail("ROCm/HIP may have a bug") + cp_c, cp_cov = self._cov_fit(cupy, dtype) + np_c, np_cov = self._cov_fit(numpy, dtype) + testing.assert_allclose(cp_c, np_c, rtol=1e-5) + testing.assert_allclose(cp_cov, np_cov, rtol=1e-3) + + +class TestPolyfit: + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_poor_fit(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.warns(RankWarning): + xp.polyfit(x, y, 6) + + +@testing.parameterize( + *testing.product( + { + "shape": [(), (0,), (5, 3, 3)], + } + ) +) +class TestPolyfitInvalidShapes: + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_x_invalid_shape(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange(self.shape, xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(x, y, 5) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_y_invalid_shape(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + y = testing.shaped_arange(self.shape, xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(x, y, 5) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_w_invalid_shape(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + w = testing.shaped_arange(self.shape, xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(x, x, 5, w=w) + + +class TestPolyfitInvalid: + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_neg_degree(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(ValueError): + xp.polyfit(x, y, -4) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_complex_degree(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(x, y, 5j) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_xy_mismatched_length(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((10,), xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(x, y, 5) + + @testing.for_all_dtypes(no_float16=True) + def test_polyfit_yw_mismatched_length(self, dtype): + for xp in (numpy, cupy): + y = testing.shaped_arange((10,), xp, dtype) + w = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(TypeError): + xp.polyfit(y, y, 5, w=w) + + @testing.for_all_dtypes(no_float16=True, no_bool=True) + def test_polyfit_cov_invalid(self, dtype): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, dtype) + y = testing.shaped_arange((5,), xp, dtype) + with pytest.raises(ValueError): + xp.polyfit(x, y, 5, cov=True) + + def test_polyfit_float16(self): + for xp in (numpy, cupy): + x = testing.shaped_arange((5,), xp, numpy.float16) + with pytest.raises(TypeError): + xp.polyfit(x, x, 4) + + +class TestPolyfitDiffTypes: + + @testing.for_all_dtypes_combination( + names=["dtype1", "dtype2"], no_bool=True, full=True + ) + @testing.numpy_cupy_allclose(rtol=1e-1, atol=1e-1, accept_error=TypeError) + def test_polyfit_unweighted_diff_types(self, xp, dtype1, dtype2): + x = testing.shaped_arange((5,), xp, dtype1) + y = testing.shaped_arange((5,), xp, dtype2) + return xp.polyfit(x, y, 5) + + @testing.for_all_dtypes_combination( + names=["dtype1", "dtype2", "dtype3"], no_bool=True, full=True + ) + @testing.numpy_cupy_allclose(atol=1e-0, accept_error=TypeError) + def test_polyfit_weighted_diff_types(self, xp, dtype1, dtype2, dtype3): + x = testing.shaped_arange((5,), xp, dtype1) + y = testing.shaped_arange((5,), xp, dtype2) + w = testing.shaped_arange((5,), xp, dtype3) + return xp.polyfit(x, y, 5, w=w) + + +@testing.parameterize( + *testing.product( + { + "type_l": ["poly1d", "ndarray"], + "type_r": ["poly1d", "ndarray", "numpy_scalar", "python_scalar"], + } + ) +) +class TestPolyval(Poly1dTestBase): + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-2, "default": 1e-3}) + def test_polyval(self, xp, dtype): + a1 = self._get_input(xp, self.type_l, dtype, size=5) + a2 = self._get_input(xp, self.type_r, dtype, size=5) + + if self.type_r == "python_scalar": + pytest.skip("XXX: np2.0: numpy always returns f64") + + return xp.polyval(a1, a2) + + +@testing.parameterize( + *testing.product( + { + "type_l": ["numpy_scalar", "python_scalar"], + "type_r": ["poly1d", "ndarray", "python_scalar", "numpy_scalar"], + } + ) +) +class TestPolyvalInvalidTypes(Poly1dTestBase): + + @testing.for_all_dtypes() + def test_polyval(self, dtype): + for xp in (numpy, cupy): + a1 = self._get_input(xp, self.type_l, dtype) + a2 = self._get_input(xp, self.type_r, dtype) + with pytest.raises(TypeError): + xp.polyval(a1, a2) + + +@testing.parameterize( + *testing.product( + {"shape1": [(0,), (3,), (5,)], "shape2": [(), (0,), (3,), (5,)]} + ) +) +class TestPolyvalShapeCombination: + + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-2, "default": 1e-6}) + def test_polyval(self, xp, dtype): + a = testing.shaped_arange(self.shape1, xp, dtype) + b = testing.shaped_arange(self.shape2, xp, dtype) + return xp.polyval(a, b) + + +@testing.parameterize(*testing.product({"shape": [(), (0,), (3,), (5,)]})) +class TestPolyvalInvalidShapeCombination: + + @testing.for_all_dtypes() + def test_polyval(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((), xp, dtype) + b = testing.shaped_arange(self.shape, xp, dtype) + with pytest.raises(TypeError): + xp.polyval(a, b) + + +class TestPolyvalDtypesCombination: + + @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"], full=True) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-2, "default": 1e-6}) + def test_polyval_diff_types_array_array(self, xp, dtype1, dtype2): + a = testing.shaped_arange((10,), xp, dtype1) + b = testing.shaped_arange((3,), xp, dtype2) + return xp.polyval(a, b) + + @testing.with_requires("numpy>=1.25") + @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"], full=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_polyval_diff_types_array_scalar(self, xp, dtype1, dtype2): + a = testing.shaped_arange((10,), xp, dtype1) + b = dtype2(3) + return xp.polyval(a, b) + + +class TestPolyvalMultiDimensional: + + @testing.for_all_dtypes() + def test_polyval_ndim_values(self, dtype): + a = testing.shaped_arange((10,), cupy, dtype) + b = testing.shaped_arange((2, 4), cupy, dtype) + return cupy.polyval(a, b) + + @testing.for_all_dtypes() + def test_polyval_poly1d_values(self, dtype): + a = testing.shaped_arange((5,), cupy, dtype) + b = testing.shaped_arange((3,), cupy, dtype) + b = cupy.poly1d(b) + return cupy.polyval(a, b) + + +@testing.parameterize( + *testing.product( + { + "fname": ["polyadd", "polysub", "polymul", "polyval"], + } + ) +) +class TestPolyRoutinesNdim: + + @testing.for_all_dtypes() + def test_polyroutine_ndim(self, dtype): + for xp in (numpy, cupy): + func = getattr(xp, self.fname) + a = testing.shaped_arange((2, 3, 4), xp, dtype) + b = testing.shaped_arange((10, 5), xp, dtype) + with pytest.raises(ValueError): + func(a, b) + + +@testing.parameterize( + *testing.product( + { + "input": [[2, -1, -2], [-4, 10, 4]], + } + ) +) +@pytest.mark.skipif( + runtime.is_hip and driver.get_build_version() < 402, + reason="syevj not available", +) +class TestRootsReal: + + @testing.for_signed_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_roots_array(self, xp, dtype): + a = xp.array(self.input, dtype) + out = xp.roots(a) + return xp.sort(out) + + @testing.for_signed_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_roots_poly1d(self, xp, dtype): + a = xp.array(self.input, dtype) + out = xp.roots(xp.poly1d(a)) + return xp.sort(out) + + +@testing.parameterize( + *testing.product( + { + "input": [[3j, 1.5j, -3j], [3 + 2j, 5], [3j, 0], [0, 3j]], + } + ) +) +class TestRootsComplex: + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_roots_array(self, xp, dtype): + if runtime.is_hip and self.input == [3j, 1.5j, -3j]: + pytest.xfail("rocBLAS not implemented") + a = xp.array(self.input, dtype) + out = xp.roots(a) + return xp.sort(out) + + @testing.for_complex_dtypes() + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_roots_poly1d(self, xp, dtype): + if runtime.is_hip and self.input == [3j, 1.5j, -3j]: + pytest.xfail("rocBLAS not implemented") + a = xp.array(self.input, dtype) + out = xp.roots(xp.poly1d(a)) + return xp.sort(out) + + +@testing.parameterize( + *testing.product( + { + "input": [[5, 10], [5, 0], [0, 5], [0, 0], [5]], + } + ) +) +class TestRootsSpecialCases: + + @testing.for_all_dtypes(no_float16=True, no_bool=True) + @testing.numpy_cupy_array_equal() + def test_roots_array(self, xp, dtype): + a = xp.array(self.input, dtype) + return xp.roots(a) + + @testing.for_all_dtypes(no_float16=True, no_bool=True) + @testing.numpy_cupy_array_equal() + def test_roots_poly1d(self, xp, dtype): + a = xp.array(self.input, dtype) + return xp.roots(xp.poly1d(a)) + + +class TestRoots: + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + def test_roots_zero_sized(self, xp, dtype): + a = xp.zeros((0,), dtype) + return xp.roots(a) + + @testing.with_requires("numpy>1.17") + @testing.for_all_dtypes(no_bool=True) + def test_roots_zero_dim(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_random((), xp, dtype) + with pytest.raises(TypeError): + xp.roots(a) + + @testing.for_all_dtypes(no_bool=True) + def test_roots_ndim(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_arange((3, 1), xp, dtype) + with pytest.raises(ValueError): + xp.roots(a) + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + def test_roots_zeros(self, xp, dtype): + a = xp.zeros((3,), dtype) + return xp.roots(a) + + @testing.for_all_dtypes(no_bool=True) + def test_roots_zeros_ndim(self, dtype): + for xp in (numpy, cupy): + a = xp.zeros((2, 1), dtype) + with pytest.raises(ValueError): + xp.roots(a) + + def test_roots_bool_symmetric(self): + a = cupy.array([5, -1, -5], bool) + with pytest.raises(NotImplementedError): + cupy.roots(a) diff --git a/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py b/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py index c4063a569289..76da1cdb1de0 100644 --- a/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py +++ b/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py @@ -11,9 +11,10 @@ @testing.parameterize(*(testing.product({"axis": [0, 1, -1]}))) class TestApplyAlongAxis(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_simple(self, xp): - a = xp.ones((20, 10), dtype="f") + a = xp.ones((20, 10), dtype=cupy.default_float_type()) return xp.apply_along_axis(len, self.axis, a) @testing.for_all_dtypes(no_bool=True) @@ -109,6 +110,7 @@ def test_apply_along_axis_invalid_axis(): class TestPutAlongAxis(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_put_along_axis_empty(self, xp, dtype): @@ -145,6 +147,7 @@ def test_indices_values_arr_diff_dims(self, dtype): ) ) class TestPutAlongAxes(unittest.TestCase): + def test_replace_max(self): arr = cupy.array([[10, 30, 20], [60, 40, 50]]) indices_max = cupy.argmax(arr, axis=self.axis, keepdims=True) @@ -156,6 +159,7 @@ def test_replace_max(self): class TestPutAlongAxisNone(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_axis_none(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/lib_tests/test_strided_tricks.py b/dpnp/tests/third_party/cupy/lib_tests/test_strided_tricks.py new file mode 100644 index 000000000000..eb8f73244b17 --- /dev/null +++ b/dpnp/tests/third_party/cupy/lib_tests/test_strided_tricks.py @@ -0,0 +1,134 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# from cupy.lib import stride_tricks + + +pytest.skip("stride tricks are not supported yet", allow_module_level=True) + + +class TestAsStrided(unittest.TestCase): + def test_as_strided(self): + a = cupy.array([1, 2, 3, 4]) + a_view = stride_tricks.as_strided( + a, shape=(2,), strides=(2 * a.itemsize,) + ) + expected = cupy.array([1, 3]) + testing.assert_array_equal(a_view, expected) + + a = cupy.array([1, 2, 3, 4]) + a_view = stride_tricks.as_strided( + a, shape=(3, 4), strides=(0, 1 * a.itemsize) + ) + expected = cupy.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + testing.assert_array_equal(a_view, expected) + + @testing.numpy_cupy_array_equal() + def test_rolling_window(self, xp): + a = testing.shaped_arange((3, 4), xp) + a_rolling = rolling_window(a, 2, 0) + return a_rolling + + +class TestSlidingWindowView(unittest.TestCase): + @testing.numpy_cupy_array_equal() + def test_1d(self, xp): + arr = testing.shaped_arange((3, 4), xp) + window_size = 2 + arr_view = xp.lib.stride_tricks.sliding_window_view(arr, window_size, 0) + assert arr_view.strides == (16, 4, 16) + return arr_view + + @testing.numpy_cupy_array_equal() + def test_2d(self, xp): + arr = testing.shaped_arange((3, 4), xp) + window_shape = (2, 2) + arr_view = xp.lib.stride_tricks.sliding_window_view( + arr, window_shape=window_shape + ) + assert arr_view.strides == (16, 4, 16, 4) + return arr_view + + @testing.numpy_cupy_array_equal() + def test_2d_with_axis(self, xp): + arr = testing.shaped_arange((3, 4), xp) + window_shape = 3 + axis = 1 + arr_view = xp.lib.stride_tricks.sliding_window_view( + arr, window_shape, axis + ) + assert arr_view.strides == (16, 4, 4) + return arr_view + + @testing.numpy_cupy_array_equal() + def test_2d_multi_axis(self, xp): + arr = testing.shaped_arange((3, 4), xp) + window_shape = (2, 3) + axis = (0, 1) + arr_view = xp.lib.stride_tricks.sliding_window_view( + arr, window_shape, axis + ) + assert arr_view.strides == (16, 4, 16, 4) + return arr_view + + def test_0d(self): + for xp in (numpy, cupy): + # Create a 0-D array (scalar) for testing + arr = xp.array(42) + # Sliding window with window size 1 + window_size = 1 + + # Test if the correct ValueError is raised! + with pytest.raises(ValueError, match="axis 0 is out of bounds"): + xp.lib.stride_tricks.sliding_window_view(arr, window_size, 0) + + def test_window_shape_axis_length_mismatch(self): + for xp in (numpy, cupy): + x = xp.arange(24).reshape((2, 3, 4)) + window_shape = (2, 2) + axis = None + + # Test if ValueError is raised when len(window_shape) != len(axis) + with pytest.raises(ValueError, match="Since axis is `None`"): + xp.lib.stride_tricks.sliding_window_view(x, window_shape, axis) + + @testing.numpy_cupy_array_equal() + def test_arraylike_input(self, xp): + x = [0.0, 1.0, 2.0, 3.0, 4.0] + arr_view = xp.lib.stride_tricks.sliding_window_view(x, 2) + assert arr_view.strides == (8, 8) + return arr_view + + def test_writeable_views_not_supported(self): + x = cupy.arange(24).reshape((2, 3, 4)) + window_shape = (2, 2) + axis = None + writeable = True + + with self.assertRaises(NotImplementedError): + stride_tricks.sliding_window_view( + x, window_shape, axis, writeable=writeable + ) + + +def rolling_window(a, window, axis=-1): + """ + Make an ndarray with a rolling window along axis. + This function is taken from https://github.com/numpy/numpy/pull/31 + but slightly modified to accept axis option. + """ + a = numpy.swapaxes(a, axis, -1) + shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) + strides = a.strides + (a.strides[-1],) + if isinstance(a, numpy.ndarray): + rolling = numpy.lib.stride_tricks.as_strided( + a, shape=shape, strides=strides + ) + elif isinstance(a, cupy.ndarray): + rolling = stride_tricks.as_strided(a, shape=shape, strides=strides) + return rolling.swapaxes(-2, axis) diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_decomposition.py b/dpnp/tests/third_party/cupy/linalg_tests/test_decomposition.py index acc140e37195..9948f4d0a920 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_decomposition.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_decomposition.py @@ -27,7 +27,7 @@ def random_matrix(shape, dtype, scale, sym=False): if dtype.kind in "u": assert sym, ( "generating nonsymmetric matrix with uint cells is not" - " supported." + " supported" ) # (singular value of numpy.ones((m, n))) <= \sqrt{mn} high_s = bias = high_s / (1 + numpy.sqrt(m * n)) @@ -57,6 +57,7 @@ def stacked_identity(xp, batch_shape, n, dtype): class TestCholeskyDecomposition: + @testing.numpy_cupy_allclose(atol=1e-3, type_check=has_support_aspect64()) def check_L(self, array, xp): a = xp.asarray(array) @@ -126,6 +127,7 @@ def test_empty(self, shape, xp, dtype): class TestCholeskyInvalid(unittest.TestCase): + def check_L(self, array): for xp in (numpy, cupy): a = xp.asarray(array) @@ -150,6 +152,75 @@ def test_decomposition(self, dtype): self.check_L(A) +@testing.parameterize( + *testing.product( + { + "mode": ["r", "raw", "complete", "reduced"], + } + ) +) +class TestQRDecomposition(unittest.TestCase): + + @testing.for_dtypes("fdFD") + def check_mode(self, array, mode, dtype): + a_cpu = numpy.asarray(array, dtype=dtype) + a_gpu = cupy.asarray(array, dtype=dtype) + result_gpu = cupy.linalg.qr(a_gpu, mode=mode) + if ( + mode != "raw" + or numpy.lib.NumpyVersion(numpy.__version__) >= "1.22.0rc1" + ): + result_cpu = numpy.linalg.qr(a_cpu, mode=mode) + self._check_result(result_cpu, result_gpu) + + def _check_result(self, result_cpu, result_gpu): + if isinstance(result_cpu, tuple): + for b_cpu, b_gpu in zip(result_cpu, result_gpu): + assert b_cpu.dtype == b_gpu.dtype + testing.assert_allclose(b_cpu, b_gpu, atol=1e-4) + else: + assert result_cpu.dtype == result_gpu.dtype + testing.assert_allclose(result_cpu, result_gpu, atol=1e-4) + + @testing.fix_random() + @_condition.repeat(3, 10) + def test_mode(self): + self.check_mode(numpy.random.randn(2, 4), mode=self.mode) + self.check_mode(numpy.random.randn(3, 3), mode=self.mode) + self.check_mode(numpy.random.randn(5, 4), mode=self.mode) + + @testing.with_requires("numpy>=1.22") + @testing.fix_random() + def test_mode_rank3(self): + self.check_mode(numpy.random.randn(3, 2, 4), mode=self.mode) + self.check_mode(numpy.random.randn(4, 3, 3), mode=self.mode) + self.check_mode(numpy.random.randn(2, 5, 4), mode=self.mode) + + @testing.with_requires("numpy>=1.22") + @testing.fix_random() + def test_mode_rank4(self): + self.check_mode(numpy.random.randn(2, 3, 2, 4), mode=self.mode) + self.check_mode(numpy.random.randn(2, 4, 3, 3), mode=self.mode) + self.check_mode(numpy.random.randn(2, 2, 5, 4), mode=self.mode) + + @testing.with_requires("numpy>=1.16") + def test_empty_array(self): + self.check_mode(numpy.empty((0, 3)), mode=self.mode) + self.check_mode(numpy.empty((3, 0)), mode=self.mode) + + @testing.with_requires("numpy>=1.22") + def test_empty_array_rank3(self): + self.check_mode(numpy.empty((0, 3, 2)), mode=self.mode) + self.check_mode(numpy.empty((3, 0, 2)), mode=self.mode) + self.check_mode(numpy.empty((3, 2, 0)), mode=self.mode) + self.check_mode(numpy.empty((0, 3, 3)), mode=self.mode) + self.check_mode(numpy.empty((3, 0, 3)), mode=self.mode) + self.check_mode(numpy.empty((3, 3, 0)), mode=self.mode) + self.check_mode(numpy.empty((0, 2, 3)), mode=self.mode) + self.check_mode(numpy.empty((2, 0, 3)), mode=self.mode) + self.check_mode(numpy.empty((2, 3, 0)), mode=self.mode) + + @testing.parameterize( *testing.product( { @@ -159,6 +230,7 @@ def test_decomposition(self, dtype): ) @testing.fix_random() class TestSVD(unittest.TestCase): + def setUp(self): self.seed = testing.generate_seed() @@ -196,7 +268,6 @@ def check_usv(self, shape, dtype): # reconstruct the matrix k = s_cpu.shape[-1] - if len(shape) == 2: if self.full_matrices: a_gpu_usv = cupy.dot(u_gpu[:, :k] * s_gpu, vh_gpu[:k, :]) @@ -238,9 +309,7 @@ def check_usv(self, shape, dtype): ] ) @testing.numpy_cupy_allclose( - rtol=1e-5, - atol=1e-4, - type_check=has_support_aspect64(), + rtol=1e-5, atol=1e-4, type_check=has_support_aspect64() ) def check_singular(self, shape, xp, dtype): array = testing.shaped_random(shape, xp, dtype=dtype, seed=self.seed) @@ -286,7 +355,7 @@ def test_svd_rank3(self): self.check_usv((2, 4, 4)) self.check_usv((2, 7, 3)) self.check_usv((2, 4, 3)) - self.check_usv((2, 32, 32)) + self.check_usv((2, 32, 32)) # still use _gesvdj_batched @_condition.repeat(3, 10) def test_svd_rank3_loop(self): @@ -342,7 +411,7 @@ def test_svd_rank4(self): self.check_usv((2, 2, 4, 4)) self.check_usv((2, 2, 7, 3)) self.check_usv((2, 2, 4, 3)) - self.check_usv((2, 2, 32, 32)) + self.check_usv((2, 2, 32, 32)) # still use _gesvdj_batched @_condition.repeat(3, 10) def test_svd_rank4_loop(self): @@ -371,71 +440,3 @@ def test_svd_rank4_empty_array(self): self.check_usv((0, 2, 3, 4)) self.check_usv((1, 2, 0, 4)) self.check_usv((1, 2, 3, 0)) - - -@testing.parameterize( - *testing.product( - { - "mode": ["r", "raw", "complete", "reduced"], - } - ) -) -class TestQRDecomposition(unittest.TestCase): - @testing.for_dtypes("fdFD") - def check_mode(self, array, mode, dtype): - a_cpu = numpy.asarray(array, dtype=dtype) - a_gpu = cupy.asarray(array, dtype=dtype) - result_gpu = cupy.linalg.qr(a_gpu, mode=mode) - if ( - mode != "raw" - or numpy.lib.NumpyVersion(numpy.__version__) >= "1.22.0rc1" - ): - result_cpu = numpy.linalg.qr(a_cpu, mode=mode) - self._check_result(result_cpu, result_gpu) - - def _check_result(self, result_cpu, result_gpu): - if isinstance(result_cpu, tuple): - for b_cpu, b_gpu in zip(result_cpu, result_gpu): - assert b_cpu.dtype == b_gpu.dtype - testing.assert_allclose(b_cpu, b_gpu, atol=1e-4) - else: - assert result_cpu.dtype == result_gpu.dtype - testing.assert_allclose(result_cpu, result_gpu, atol=1e-4) - - @testing.fix_random() - @_condition.repeat(3, 10) - def test_mode(self): - self.check_mode(numpy.random.randn(2, 4), mode=self.mode) - self.check_mode(numpy.random.randn(3, 3), mode=self.mode) - self.check_mode(numpy.random.randn(5, 4), mode=self.mode) - - @testing.with_requires("numpy>=1.22") - @testing.fix_random() - def test_mode_rank3(self): - self.check_mode(numpy.random.randn(3, 2, 4), mode=self.mode) - self.check_mode(numpy.random.randn(4, 3, 3), mode=self.mode) - self.check_mode(numpy.random.randn(2, 5, 4), mode=self.mode) - - @testing.with_requires("numpy>=1.22") - @testing.fix_random() - def test_mode_rank4(self): - self.check_mode(numpy.random.randn(2, 3, 2, 4), mode=self.mode) - self.check_mode(numpy.random.randn(2, 4, 3, 3), mode=self.mode) - self.check_mode(numpy.random.randn(2, 2, 5, 4), mode=self.mode) - - @testing.with_requires("numpy>=1.16") - def test_empty_array(self): - self.check_mode(numpy.empty((0, 3)), mode=self.mode) - self.check_mode(numpy.empty((3, 0)), mode=self.mode) - - @testing.with_requires("numpy>=1.22") - def test_empty_array_rank3(self): - self.check_mode(numpy.empty((0, 3, 2)), mode=self.mode) - self.check_mode(numpy.empty((3, 0, 2)), mode=self.mode) - self.check_mode(numpy.empty((3, 2, 0)), mode=self.mode) - self.check_mode(numpy.empty((0, 3, 3)), mode=self.mode) - self.check_mode(numpy.empty((3, 0, 3)), mode=self.mode) - self.check_mode(numpy.empty((3, 3, 0)), mode=self.mode) - self.check_mode(numpy.empty((0, 2, 3)), mode=self.mode) - self.check_mode(numpy.empty((2, 0, 3)), mode=self.mode) - self.check_mode(numpy.empty((2, 3, 0)), mode=self.mode) diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_eigenvalue.py b/dpnp/tests/third_party/cupy/linalg_tests/test_eigenvalue.py index 60c8db40ba0f..23a5edef63b4 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_eigenvalue.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_eigenvalue.py @@ -21,6 +21,7 @@ def _get_hermitian(xp, a, UPLO): ) ) class TestEigenvalue: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose( rtol=1e-3, @@ -47,9 +48,7 @@ def test_eigh(self, xp, dtype): tol = 1e-3 else: tol = 1e-5 - testing.assert_allclose(A @ v, v @ xp.diag(w), atol=tol, rtol=tol) - # Check if v @ vt is an identity matrix testing.assert_allclose( v @ v.swapaxes(-2, -1).conj(), @@ -87,7 +86,7 @@ def test_eigh_batched(self, xp, dtype): ) return w - @testing.for_complex_dtypes() + @testing.for_dtypes("FD") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4) def test_eigh_complex_batched(self, xp, dtype): a = xp.array( @@ -105,7 +104,6 @@ def test_eigh_complex_batched(self, xp, dtype): # eigenvectors, so v's are not directly comparable and we verify # them through the eigen equation A*v=w*v. A = _get_hermitian(xp, a, self.UPLO) - for i in range(a.shape[0]): testing.assert_allclose( A[i].dot(v[i]), w[i] * v[i], rtol=1e-5, atol=1e-5 @@ -165,44 +163,54 @@ def test_eigvalsh_complex_batched(self, xp, dtype): return w -@testing.parameterize( - *testing.product( - {"UPLO": ["U", "L"], "shape": [(0, 0), (2, 0, 0), (0, 3, 3)]} - ) +@pytest.mark.parametrize("UPLO", ["U", "L"]) +@pytest.mark.parametrize( + "shape", + [ + (0, 0), + (2, 0, 0), + (0, 3, 3), + ], ) class TestEigenvalueEmpty: + @testing.for_dtypes("ifdFD") @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) - def test_eigh(self, xp, dtype): - a = xp.empty(self.shape, dtype=dtype) + def test_eigh(self, xp, dtype, shape, UPLO): + a = xp.empty(shape, dtype=dtype) assert a.size == 0 - return xp.linalg.eigh(a, UPLO=self.UPLO) + return xp.linalg.eigh(a, UPLO=UPLO) @testing.for_dtypes("ifdFD") @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) - def test_eigvalsh(self, xp, dtype): - a = xp.empty(self.shape, dtype=dtype) + def test_eigvalsh(self, xp, dtype, shape, UPLO): + a = xp.empty(shape, dtype=dtype) assert a.size == 0 - return xp.linalg.eigvalsh(a, UPLO=self.UPLO) - - -@testing.parameterize( - *testing.product( - { - "UPLO": ["U", "L"], - "shape": [(), (3,), (2, 3), (4, 0), (2, 2, 3), (0, 2, 3)], - } - ) + return xp.linalg.eigvalsh(a, UPLO=UPLO) + + +@pytest.mark.parametrize("UPLO", ["U", "L"]) +@pytest.mark.parametrize( + "shape", + [ + (), + (3,), + (2, 3), + (4, 0), + (2, 2, 3), + (0, 2, 3), + ], ) class TestEigenvalueInvalid: - def test_eigh_shape_error(self): + + def test_eigh_shape_error(self, UPLO, shape): for xp in (numpy, cupy): - a = xp.zeros(self.shape) + a = xp.zeros(shape) with pytest.raises(xp.linalg.LinAlgError): - xp.linalg.eigh(a, self.UPLO) + xp.linalg.eigh(a, UPLO) - def test_eigvalsh_shape_error(self): + def test_eigvalsh_shape_error(self, UPLO, shape): for xp in (numpy, cupy): - a = xp.zeros(self.shape) + a = xp.zeros(shape) with pytest.raises(xp.linalg.LinAlgError): - xp.linalg.eigvalsh(a, self.UPLO) + xp.linalg.eigvalsh(a, UPLO) diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_einsum.py b/dpnp/tests/third_party/cupy/linalg_tests/test_einsum.py index 8b0510e8a61a..0714fbc05a72 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_einsum.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_einsum.py @@ -31,8 +31,8 @@ def _rand1_shape(shape, prob): def augment_einsum_testcases(*params): """Modify shapes in einsum tests - Shape parameter should be starts with "shape_". - The original parameter is stored as "_raw_params". + Shape parameter should be starts with 'shape_'. + The original parameter is stored as '_raw_params'. Args: params (sequence of dicts) @@ -61,6 +61,7 @@ def augment_einsum_testcases(*params): class TestEinSumError: + def test_irregular_ellipsis1(self): for xp in (numpy, cupy): with pytest.raises(ValueError): @@ -233,6 +234,7 @@ def test_invalid_arrow4(self): class TestListArgEinSumError: + @testing.with_requires("numpy>=1.19") def test_invalid_sub1(self): for xp in (numpy, cupy): @@ -338,6 +340,7 @@ def test_numpy_15961_list(self, xp, do_opt): ) ) class TestEinSumUnaryOperation: + @testing.for_all_dtypes(no_bool=False) @testing.numpy_cupy_allclose( rtol={numpy.float16: 1e-1, "default": 1e-7}, contiguous_check=False @@ -350,13 +353,15 @@ def test_einsum_unary(self, xp, dtype): testing.assert_allclose(optimized_out, out) return out - @pytest.mark.skip("view is not supported") @testing.for_all_dtypes(no_bool=False) @testing.numpy_cupy_equal() def test_einsum_unary_views(self, xp, dtype): a = testing.shaped_arange(self.shape_a, xp, dtype) b = xp.einsum(self.subscripts, a) - + if xp is cupy: + return ( + b.ndim == 0 or b.get_array()._pointer == a.get_array()._pointer + ) return b.ndim == 0 or b.base is a @testing.for_all_dtypes_combination( @@ -373,13 +378,13 @@ def test_einsum_unary_dtype(self, xp, dtype_a, dtype_out): class TestEinSumUnaryOperationWithScalar: - @pytest.mark.skip("All operands are scalar.") + @pytest.mark.skip("Scalar input is not supported") @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_scalar_int(self, xp, dtype): return xp.asarray(xp.einsum("->", 2, dtype=dtype)) - @pytest.mark.skip("All operands are scalar.") + @pytest.mark.skip("Scalar input is not supported") @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_scalar_float(self, xp, dtype): @@ -574,7 +579,7 @@ def test_einsum_ternary(self, xp, dtype_a, dtype_b, dtype_c): if xp is not numpy: # Avoid numpy issues #11059, #11060 for optimize in [ - True, # "greedy" + True, # 'greedy' "optimal", ["einsum_path", (0, 1), (0, 1)], ["einsum_path", (0, 2), (0, 1)], @@ -616,6 +621,7 @@ def test_einsum_ternary(self, xp, dtype_a, dtype_b, dtype_c): ) ) class TestEinSumLarge: + chars = "abcdefghij" sizes = (2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3) size_dict = {} @@ -638,7 +644,7 @@ def test_einsum(self, xp, shapes): ] # TODO(kataoka): support memory efficient cupy.einsum with warnings.catch_warnings(record=True) as ws: - # I hope there"s no problem with np.einsum for these cases... + # I hope there's no problem with np.einsum for these cases... out = xp.einsum(self.subscript, *arrays, optimize=self.opt) if xp is not numpy and isinstance( self.opt, tuple diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_norms.py b/dpnp/tests/third_party/cupy/linalg_tests/test_norms.py index 786428707a87..105dc2184a82 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_norms.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_norms.py @@ -58,6 +58,7 @@ def test_external_trace(self, xp, dtype): ) ) class TestNorm(unittest.TestCase): + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4, type_check=False) # since dtype of sum is different in dpnp and NumPy, type_check=False @@ -88,6 +89,7 @@ def test_norm(self, xp, dtype): ) ) class TestMatrixRank(unittest.TestCase): + @testing.for_all_dtypes(no_float16=True, no_complex=True) @testing.numpy_cupy_array_equal(type_check=True) def test_matrix_rank(self, xp, dtype): @@ -103,6 +105,7 @@ def test_matrix_rank(self, xp, dtype): class TestDet(unittest.TestCase): + @testing.for_float_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4) def test_det(self, xp, dtype): @@ -179,6 +182,7 @@ def test_det_singular(self, xp, dtype): class TestSlogdet(unittest.TestCase): + @testing.for_dtypes("fdFD") @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4) def test_slogdet(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_product.py b/dpnp/tests/third_party/cupy/linalg_tests/test_product.py index 7063153b447b..a712c1cb0328 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_product.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_product.py @@ -1,4 +1,6 @@ +import sys import unittest +import warnings import numpy import pytest @@ -37,6 +39,7 @@ ) ) class TestDot(unittest.TestCase): + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_dot(self, xp, dtype_a, dtype_b): @@ -87,22 +90,46 @@ def test_dot_with_out(self, xp, dtype_a, dtype_b, dtype_c): # Test for 0 dimension ((3,), (3,), -1, -1, -1), # Test for basic cases - ((1, 2), (1, 2), -1, -1, 1), ((1, 3), (1, 3), 1, -1, -1), + # Test for higher dimensions + ((2, 4, 5, 3), (2, 4, 5, 3), -1, -1, 0), + ], + } + ) +) +class TestCrossProduct(unittest.TestCase): + + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) + @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + def test_cross(self, xp, dtype_a, dtype_b): + if dtype_a == dtype_b == numpy.bool_: + # cross does not support bool-bool inputs. + return xp.array(True) + shape_a, shape_b, axisa, axisb, axisc = self.params + a = testing.shaped_arange(shape_a, xp, dtype_a) + b = testing.shaped_arange(shape_b, xp, dtype_b) + return xp.cross(a, b, axisa, axisb, axisc) + + +# XXX: cross with 2D vectors is deprecated in NumPy 2.0, also CuPy 1.14 +@testing.parameterize( + *testing.product( + { + "params": [ + # Test for basic cases + ((1, 2), (1, 2), -1, -1, 1), ((1, 2), (1, 3), -1, -1, 1), ((2, 2), (1, 3), -1, -1, 0), ((3, 3), (1, 2), 0, -1, -1), ((0, 3), (0, 3), -1, -1, -1), # Test for higher dimensions ((2, 0, 3), (2, 0, 3), 0, 0, 0), - ((2, 4, 5, 3), (2, 4, 5, 3), -1, -1, 0), ((2, 4, 5, 2), (2, 4, 5, 2), 0, 0, -1), ], } ) ) -class TestCrossProduct(unittest.TestCase): - @pytest.mark.filterwarnings("ignore::DeprecationWarning") +class TestCrossProductDeprecated(unittest.TestCase): @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_cross(self, xp, dtype_a, dtype_b): @@ -112,7 +139,48 @@ def test_cross(self, xp, dtype_a, dtype_b): shape_a, shape_b, axisa, axisb, axisc = self.params a = testing.shaped_arange(shape_a, xp, dtype_a) b = testing.shaped_arange(shape_b, xp, dtype_b) - return xp.cross(a, b, axisa, axisb, axisc) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + res = xp.cross(a, b, axisa, axisb, axisc) + return res + + +@testing.parameterize( + *testing.product( + { + "params": [ + # Test for 0 dimension + ( + (3,), + (3,), + -1, + ), + # Test for basic cases + ( + (1, 3), + (1, 3), + 1, + ), + # Test for higher dimensions + ((2, 4, 5, 3), (2, 4, 5, 3), -1), + ], + } + ) +) +class TestLinalgCrossProduct(unittest.TestCase): + + @testing.with_requires("numpy>=2.0") + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) + @testing.numpy_cupy_allclose() + def test_cross(self, xp, dtype_a, dtype_b): + if dtype_a == dtype_b == numpy.bool_: + # cross does not support bool-bool inputs. + return xp.array(True) + shape_a, shape_b, axis = self.params + a = testing.shaped_arange(shape_a, xp, dtype_a) + b = testing.shaped_arange(shape_b, xp, dtype_b) + return xp.linalg.cross(a, b, axis=axis) @testing.parameterize( @@ -129,6 +197,7 @@ def test_cross(self, xp, dtype_a, dtype_b): ) ) class TestDotFor0Dim(unittest.TestCase): + @testing.for_all_dtypes_combination(["dtype_a", "dtype_b"]) @testing.numpy_cupy_allclose( type_check=has_support_aspect64(), contiguous_check=False @@ -147,6 +216,7 @@ def test_dot(self, xp, dtype_a, dtype_b): class TestProduct: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_dot_vec1(self, xp, dtype): @@ -403,7 +473,9 @@ def test_zerodim_kron(self, xp, dtype): ) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_kron_accepts_numbers_as_arguments(self, a, b, xp): - args = [xp.array(arg) if type(arg) == list else arg for arg in [a, b]] + args = [ + xp.array(arg) if isinstance(arg, list) else arg for arg in [a, b] + ] return xp.kron(*args) @@ -422,6 +494,7 @@ def test_kron_accepts_numbers_as_arguments(self, a, b, xp): ) ) class TestProductZeroLength(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_tensordot_zero_length(self, xp, dtype): @@ -488,9 +561,13 @@ def test_matrix_power_large(self, xp, dtype): a = xp.eye(23, k=17, dtype=dtype) + xp.eye(23, k=-6, dtype=dtype) return xp.linalg.matrix_power(a, 123456789123456789) + @pytest.mark.skipif( + sys.platform == "win32", reason="python int overflows C long" + ) @testing.for_float_dtypes(no_float16=True) @testing.numpy_cupy_allclose() def test_matrix_power_invlarge(self, xp, dtype): + # TODO (ev-br): np 2.0: check if it's fixed in numpy 2 (broken on 1.26) a = xp.eye(23, k=17, dtype=dtype) + xp.eye(23, k=-6, dtype=dtype) return xp.linalg.matrix_power(a, -987654321987654321) @@ -504,6 +581,7 @@ def test_matrix_power_invlarge(self, xp, dtype): ) @pytest.mark.parametrize("n", [0, 5, -7]) class TestMatrixPowerBatched: + @testing.for_float_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=5e-5) def test_matrix_power_batched(self, xp, dtype, shape, n): diff --git a/dpnp/tests/third_party/cupy/linalg_tests/test_solve.py b/dpnp/tests/third_party/cupy/linalg_tests/test_solve.py index 5fb6533be33b..ecb13f3a1390 100644 --- a/dpnp/tests/third_party/cupy/linalg_tests/test_solve.py +++ b/dpnp/tests/third_party/cupy/linalg_tests/test_solve.py @@ -15,13 +15,14 @@ @testing.parameterize( *testing.product( { + # "batched_gesv_limit": [None, 0], "order": ["C", "F"], } ) ) @testing.fix_random() class TestSolve(unittest.TestCase): - # TODO: add get_batched_gesv_limit + # def setUp(self): # if self.batched_gesv_limit is not None: # self.old_limit = get_batched_gesv_limit() @@ -32,6 +33,7 @@ class TestSolve(unittest.TestCase): # set_batched_gesv_limit(self.old_limit) @testing.for_dtypes("ifdFD") + # TODO(kataoka): Fix contiguity @testing.numpy_cupy_allclose( atol=1e-3, contiguous_check=False, type_check=has_support_aspect64() ) @@ -71,6 +73,7 @@ def check_shape(self, a_shape, b_shape, error_types): # NumPy with OpenBLAS returns an empty array # while numpy with OneMKL raises LinAlgError @pytest.mark.skip("Undefined behavior") + @testing.numpy_cupy_allclose() def test_solve_singular_empty(self, xp): a = xp.zeros((3, 3)) # singular b = xp.empty((3, 0)) # nrhs = 0 @@ -94,10 +97,33 @@ def test_invalid_shape(self): self.check_shape((3, 3, 4), (3,), linalg_errors) self.check_shape((3, 3), (0,), value_errors) self.check_shape((0, 3, 4), (3,), linalg_errors) - # Not allowed since numpy 2.0 - self.check_shape((0, 2, 2), (0, 2), value_errors) - self.check_shape((2, 4, 4), (2, 4), value_errors) - self.check_shape((2, 3, 2, 2), (2, 3, 2), value_errors) + self.check_shape((3, 3), (), value_errors) + # Not allowed since numpy 2 + self.check_shape( + (0, 2, 2), + ( + 0, + 2, + ), + value_errors, + ) + self.check_shape( + (2, 4, 4), + ( + 2, + 4, + ), + value_errors, + ) + self.check_shape( + (2, 3, 2, 2), + ( + 2, + 3, + 2, + ), + value_errors, + ) @testing.parameterize( @@ -110,6 +136,7 @@ def test_invalid_shape(self): ) @testing.fix_random() class TestTensorSolve(unittest.TestCase): + @testing.for_dtypes("ifdFD") @testing.numpy_cupy_allclose(atol=0.02, type_check=has_support_aspect64()) def test_tensorsolve(self, xp, dtype): @@ -128,6 +155,7 @@ def test_tensorsolve(self, xp, dtype): ) ) class TestInv(unittest.TestCase): + @testing.for_dtypes("ifdFD") @_condition.retry(10) def check_x(self, a_shape, dtype): @@ -137,7 +165,6 @@ def check_x(self, a_shape, dtype): a_gpu_copy = a_gpu.copy() result_cpu = numpy.linalg.inv(a_cpu) result_gpu = cupy.linalg.inv(a_gpu) - assert_dtype_allclose(result_gpu, result_cpu) testing.assert_array_equal(a_gpu_copy, a_gpu) @@ -167,6 +194,7 @@ def test_invalid_shape(self): class TestInvInvalid(unittest.TestCase): + @testing.for_dtypes("ifdFD") def test_inv(self, dtype): for xp in (numpy, cupy): @@ -189,6 +217,7 @@ def test_batched_inv(self, dtype): class TestPinv(unittest.TestCase): + @testing.for_dtypes("ifdFD") @_condition.retry(10) def check_x(self, a_shape, rcond, dtype): @@ -231,6 +260,7 @@ def test_pinv_size_0(self): class TestLstsq: + @testing.for_dtypes("ifdFD") @testing.numpy_cupy_allclose(atol=1e-3, type_check=has_support_aspect64()) def check_lstsq_solution( @@ -309,20 +339,18 @@ def test_invalid_shapes(self): self.check_invalid_shapes((3, 3), (2, 2)) self.check_invalid_shapes((4, 3), (10, 3, 3)) - # dpnp.linalg.lstsq() does not raise a FutureWarning - # because dpnp did not have a previous implementation of dpnp.linalg.lstsq() - # and there is no need to get rid of old deprecated behavior as numpy did. - @pytest.mark.skip("No support of deprecated behavior") + @testing.with_requires("numpy>=2.0") @testing.for_float_dtypes(no_float16=True) @testing.numpy_cupy_allclose(atol=1e-3) - def test_warn_rcond(self, xp, dtype): + def test_nowarn_rcond(self, xp, dtype): a = testing.shaped_random((3, 3), xp, dtype) b = testing.shaped_random((3,), xp, dtype) - with testing.assert_warns(FutureWarning): - return xp.linalg.lstsq(a, b) + # FutureWarning is no longer emitted + return xp.linalg.lstsq(a, b) class TestTensorInv(unittest.TestCase): + @testing.for_dtypes("ifdFD") @_condition.retry(10) def check_x(self, a_shape, ind, dtype): diff --git a/dpnp/tests/third_party/cupy/logic_tests/test_content.py b/dpnp/tests/third_party/cupy/logic_tests/test_content.py index dc73551399b5..c4440b8eb7c9 100644 --- a/dpnp/tests/third_party/cupy/logic_tests/test_content.py +++ b/dpnp/tests/third_party/cupy/logic_tests/test_content.py @@ -6,13 +6,14 @@ class TestContent(unittest.TestCase): - @testing.for_dtypes("fd") + + @testing.for_dtypes("efFdD") @testing.numpy_cupy_array_equal() def check_unary_inf(self, name, xp, dtype): a = xp.array([-3, numpy.inf, -1, -numpy.inf, 0, 1, 2], dtype=dtype) return getattr(xp, name)(a) - @testing.for_dtypes("fd") + @testing.for_dtypes("efFdD") @testing.numpy_cupy_array_equal() def check_unary_nan(self, name, xp, dtype): a = xp.array( @@ -31,6 +32,7 @@ def test_isnan(self): class TestUfuncLike(unittest.TestCase): + @testing.numpy_cupy_array_equal() def check_unary(self, name, xp): a = xp.array([-3, xp.inf, -1, -xp.inf, 0, 1, 2, xp.nan]) diff --git a/dpnp/tests/third_party/cupy/logic_tests/test_ops.py b/dpnp/tests/third_party/cupy/logic_tests/test_ops.py index 41fe9dfb618e..c97724c32bda 100644 --- a/dpnp/tests/third_party/cupy/logic_tests/test_ops.py +++ b/dpnp/tests/third_party/cupy/logic_tests/test_ops.py @@ -4,6 +4,7 @@ class TestOps(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/logic_tests/test_truth.py b/dpnp/tests/third_party/cupy/logic_tests/test_truth.py index 512e35a27763..db9c61426f48 100644 --- a/dpnp/tests/third_party/cupy/logic_tests/test_truth.py +++ b/dpnp/tests/third_party/cupy/logic_tests/test_truth.py @@ -1,6 +1,5 @@ -import unittest - import numpy +import pytest from dpnp.tests.third_party.cupy import testing @@ -39,7 +38,8 @@ def _calc_out_shape(shape, axis, keepdims): } ) ) -class TestAllAny(unittest.TestCase): +class TestAllAny: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_without_out(self, xp, dtype): @@ -71,7 +71,8 @@ def test_with_out(self, xp, dtype): } ) ) -class TestAllAnyWithNaN(unittest.TestCase): +class TestAllAnyWithNaN: + @testing.for_dtypes((*testing._loops._float_dtypes, numpy.bool_)) @testing.numpy_cupy_array_equal() def test_without_out(self, xp, dtype): @@ -86,3 +87,205 @@ def test_with_out(self, xp, dtype): out = xp.empty(out_shape, dtype=x.dtype) getattr(xp, self.f)(x, self.axis, out, self.keepdims) return out + + +@pytest.mark.skip("isin() is not supported yet") +@testing.parameterize( + *testing.product( + { + "shape_x": [(0,), (3,), (2, 3), (2, 1, 3), (2, 0, 1), (2, 0, 1, 1)], + "shape_y": [(0,), (3,), (2, 3), (2, 1, 3), (2, 0, 1), (2, 0, 1, 1)], + "assume_unique": [False, True], + "invert": [False, True], + } + ) +) +class TestIn1DIsIn: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test(self, xp, dtype): + x = testing.shaped_arange(self.shape_x, xp, dtype) + y = testing.shaped_arange(self.shape_y, xp, dtype) + return xp.isin(x, y, self.assume_unique, self.invert) + + +@pytest.mark.skip("setdiff1d() is not supported yet") +class TestSetdiff1d: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_same_arrays(self, xp, dtype): + x = xp.array([1, 2, 3, 4, 5], dtype=dtype) + y = xp.array([1, 2, 3, 4, 5], dtype=dtype) + return xp.setdiff1d(x, y, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_diff_size_arr_inputs(self, xp, dtype): + x = xp.array([3, 4, 9, 1, 5, 4], dtype=dtype) + y = xp.array([8, 7, 3, 9, 0], dtype=dtype) + return xp.setdiff1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_diff_elements(self, xp, dtype): + x = xp.array([3, 4, 9, 1, 5, 4], dtype=dtype) + y = xp.array([8, 7, 3, 9, 0], dtype=dtype) + return xp.setdiff1d(x, y, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_with_2d(self, xp, dtype): + x = testing.shaped_random((2, 3), xp, dtype=dtype) + y = testing.shaped_random((3, 5), xp, dtype=dtype) + return xp.setdiff1d(x, y, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_with_duplicate_elements(self, xp, dtype): + x = xp.array([1, 2, 3, 2, 2, 6], dtype=dtype) + y = xp.array([3, 4, 2, 1, 1, 9], dtype=dtype) + return xp.setdiff1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_empty_arr(self, xp, dtype): + x = xp.array([], dtype=dtype) + y = xp.array([], dtype=dtype) + return xp.setdiff1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setdiff1d_more_dim(self, xp, dtype): + x = testing.shaped_arange((2, 3, 4, 8), xp, dtype=dtype) + y = testing.shaped_arange((5, 4, 2), xp, dtype=dtype) + return xp.setdiff1d(x, y, assume_unique=True) + + @testing.numpy_cupy_array_equal() + def test_setdiff1d_bool_val(self, xp): + x = xp.array([True, False, True]) + y = xp.array([False]) + return xp.setdiff1d(x, y) + + +@pytest.mark.skip("setxor1d() is not supported yet") +class TestSetxor1d: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_same_arrays(self, xp, dtype): + x = xp.array([1, 2, 3, 4, 5], dtype=dtype) + y = xp.array([1, 2, 3, 4, 5], dtype=dtype) + return xp.setxor1d(x, y, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_diff_size_arr_inputs(self, xp, dtype): + x = xp.array([3, 4, 9, 1, 5, 4], dtype=dtype) + y = xp.array([8, 7, 3, 9, 0], dtype=dtype) + return xp.setxor1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_diff_elements(self, xp, dtype): + x = xp.array([3, 4, 9, 1, 5, 4], dtype=dtype) + y = xp.array([8, 7, 3, 9, 0], dtype=dtype) + return xp.setxor1d(x, y, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_with_2d(self, xp, dtype): + x = testing.shaped_random((2, 3), xp, dtype=dtype) + y = testing.shaped_random((3, 5), xp, dtype=dtype) + return xp.setxor1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_with_duplicate_elements(self, xp, dtype): + x = xp.array([1, 2, 3, 2, 2, 6], dtype=dtype) + y = xp.array([3, 4, 2, 1, 1, 9], dtype=dtype) + return xp.setxor1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_empty_arr(self, xp, dtype): + x = xp.array([], dtype=dtype) + y = xp.array([], dtype=dtype) + return xp.setxor1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_setxor1d_more_dim(self, xp, dtype): + x = testing.shaped_arange((2, 3, 4, 8), xp, dtype=dtype) + y = testing.shaped_arange((5, 4, 2), xp, dtype=dtype) + return xp.setxor1d(x, y) + + @testing.numpy_cupy_array_equal() + def test_setxor1d_bool_val(self, xp): + x = xp.array([True, False, True]) + y = xp.array([False]) + return xp.setxor1d(x, y) + + +@pytest.mark.skip("intersect1d() is not supported yet") +class TestIntersect1d: + + @testing.for_all_dtypes(no_bool=True) + @testing.numpy_cupy_array_equal() + def test_one_dim_with_unique_values(self, xp, dtype): + a = xp.array([1, 2, 3, 4, 5], dtype=dtype) + b = xp.array([1, 2, 3, 4, 5], dtype=dtype) + return xp.intersect1d(a, b, assume_unique=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_with_random_val(self, xp, dtype): + a = xp.array([3, 4, 9, 1, 5, 4], dtype=dtype) + b = xp.array([8, 7, 3, 9, 0], dtype=dtype) + return xp.intersect1d(a, b) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_more_dim(self, xp, dtype): + a = testing.shaped_random((3, 4), xp, dtype=dtype) + b = testing.shaped_random((5, 2), xp, dtype=dtype) + return xp.intersect1d(a, b) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_return_indices(self, xp, dtype): + a = xp.array([2, 3, 4, 1, 9, 4], dtype=dtype) + b = xp.array([7, 5, 1, 2, 9, 3], dtype=dtype) + return xp.intersect1d(a, b, return_indices=True) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_multiple_instances(self, xp, dtype): + a = xp.array([2, 4, 5, 2, 1, 5], dtype=dtype) + b = xp.array([4, 6, 2, 5, 7, 6], dtype=dtype) + return xp.intersect1d(a, b, return_indices=True) + + +@pytest.mark.skip("union1d() is not supported yet") +class TestUnion1d: + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_union1d(self, xp, dtype): + x = xp.array([4, 1, 1, 1, 9, 9, 9], dtype=dtype) + y = xp.array([4, 0, 5, 2, 0, 0, 5], dtype=dtype) + return xp.union1d(x, y) + + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_union1d_2(self, xp, dtype): + x = testing.shaped_arange((5, 2), xp, dtype=dtype) + y = testing.shaped_arange((2, 3, 4), xp, dtype=dtype) + return xp.union1d(x, y) + + @testing.numpy_cupy_array_equal() + def test_union1d_3(self, xp): + x = xp.zeros((2, 2), dtype=xp.complex128) + y = xp.array([[1 + 1j, 2 + 3j], [4 + 1j, 0 + 7j]]) + return xp.union1d(x, y) diff --git a/dpnp/tests/third_party/cupy/logic_tests/test_type_test.py b/dpnp/tests/third_party/cupy/logic_tests/test_type_test.py index 599e0e16817a..ab1573a9b933 100644 --- a/dpnp/tests/third_party/cupy/logic_tests/test_type_test.py +++ b/dpnp/tests/third_party/cupy/logic_tests/test_type_test.py @@ -88,20 +88,19 @@ class TestTypeTestingFunctions(unittest.TestCase): def test(self, xp, dtype): return getattr(xp, self.func)(xp.ones(5, dtype=dtype)) - @pytest.mark.skip("support for scalar not implemented") + @pytest.mark.skip("Scalar input is not supported") @testing.for_all_dtypes() @testing.numpy_cupy_equal() def test_scalar(self, xp, dtype): return getattr(xp, self.func)(dtype(3)) - @pytest.mark.skip("support for list not implemented") + @pytest.mark.skip("List input is not supported") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_list(self, xp, dtype): - a = testing.shaped_arange((2, 3), xp, dtype) - if xp == cupy: - a = a.asnumpy() - return getattr(xp, self.func)(a.tolist()) + return getattr(xp, self.func)( + testing.shaped_arange((2, 3), xp, dtype).tolist() + ) @testing.parameterize( diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py index ab5550895065..34db4a1c8d71 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_add_remove.py @@ -13,6 +13,7 @@ class TestDelete(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_delete_with_no_axis(self, xp): arr = xp.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -59,6 +60,7 @@ def test_delete_with_indices_as_int(self, xp): class TestAppend(unittest.TestCase): + @testing.for_all_dtypes_combination( names=["dtype1", "dtype2"], no_bool=True ) @@ -102,8 +104,8 @@ def test_numpy_scalar_rhs(self, xp, dtype1, dtype2): scalar = xp.dtype(dtype2).type(10) return xp.append(xp.arange(20, dtype=dtype1), scalar) - @testing.numpy_cupy_array_equal() @pytest.mark.skip("Scalar input is not supported") + @testing.numpy_cupy_array_equal() def test_scalar_both(self, xp): return xp.append(10, 10) @@ -123,6 +125,7 @@ def test_empty(self, xp): class TestResize(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test(self, xp): return xp.resize(xp.arange(10), (10, 10)) @@ -160,6 +163,7 @@ def test_empty(self, xp): class TestUnique: + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_unique_no_axis(self, xp, dtype): @@ -174,58 +178,52 @@ def test_unique(self, xp, dtype): @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_index_no_axis(self, xp, dtype): + def test_unique_return_index_no_axis(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) return xp.unique(a, return_index=True)[1] @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_index(self, xp, dtype): + def test_unique_return_index(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) return xp.unique(a, return_index=True, axis=0)[1] + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_inverse_no_axis(self, xp, dtype): + def test_unique_return_inverse_no_axis(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) - result = xp.unique(a, return_inverse=True)[1] - if xp is numpy and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.1": - # gh-26961: numpy.unique(..., return_inverse=True, axis=None) - # returned flatten unique_inverse till 2.0.1 version - result = result.reshape(a.shape) - return result + return xp.unique(a, return_inverse=True)[1] + @testing.with_requires("numpy>=2.1") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_inverse(self, xp, dtype): + def test_unique_return_inverse(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) return xp.unique(a, return_inverse=True, axis=1)[1] @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_counts_no_axis(self, xp, dtype): + def test_unique_return_counts_no_axis(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) return xp.unique(a, return_counts=True)[1] @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() - def test_unique_counts(self, xp, dtype): + def test_unique_return_counts(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) return xp.unique(a, return_counts=True, axis=0)[1] + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_unique_return_all_no_axis(self, xp, dtype): a = testing.shaped_random((100, 100), xp, dtype) - result = xp.unique( + return xp.unique( a, return_index=True, return_inverse=True, return_counts=True ) - if xp is numpy and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.1": - # gh-26961: numpy.unique(..., return_inverse=True, axis=None) - # returned flatten unique_inverse till 2.0.1 version - result = result[:2] + (result[2].reshape(a.shape),) + result[3:] - return result + @testing.with_requires("numpy>=2.1") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_unique_return_all(self, xp, dtype): @@ -250,19 +248,16 @@ def test_unique_empty(self, xp, dtype): a = xp.empty((0,), dtype=dtype) return xp.unique(a, axis=0) + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_unique_empty_return_all_no_axis(self, xp, dtype): a = xp.empty((3, 0, 2), dtype=dtype) - result = xp.unique( + return xp.unique( a, return_index=True, return_inverse=True, return_counts=True ) - if xp is numpy and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.1": - # gh-26961: numpy.unique(..., return_inverse=True, axis=None) - # returned flatten unique_inverse till 2.0.1 version - result = result[:2] + (result[2].reshape(a.shape),) + result[3:] - return result + @testing.with_requires("numpy>=2.1") @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_unique_empty_return_all(self, xp, dtype): @@ -321,9 +316,47 @@ def test_unique_equal_nan(self, xp, dtype, equal_nan): ) return xp.unique(a, axis=1, equal_nan=equal_nan) + @pytest.mark.skip("unique_all() is not supported yet") + @testing.with_requires("numpy>=2.0") + @pytest.mark.parametrize( + "attr", ["values", "indices", "inverse_indices", "counts"] + ) + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_array_equal() + def test_unique_all(self, xp, dtype, attr): + a = testing.shaped_random((100, 100), xp, dtype) + return getattr(xp.unique_all(a), attr) + + @pytest.mark.skip("unique_counts() is not supported yet") + @testing.with_requires("numpy>=2.0") + @pytest.mark.parametrize("attr", ["values", "counts"]) + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_array_equal() + def test_unique_counts(self, xp, dtype, attr): + a = testing.shaped_random((100, 100), xp, dtype) + return getattr(xp.unique_counts(a), attr) + + @pytest.mark.skip("unique_inverse() is not supported yet") + @testing.with_requires("numpy>=2.0") + @pytest.mark.parametrize("attr", ["values", "inverse_indices"]) + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_array_equal() + def test_unique_inverse(self, xp, dtype, attr): + a = testing.shaped_random((100, 100), xp, dtype) + return getattr(xp.unique_inverse(a), attr) + + @pytest.mark.skip("unique_values() is not supported yet") + @testing.with_requires("numpy>=2.0") + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_array_equal() + def test_unique_values(self, xp, dtype): + a = testing.shaped_random((100, 100), xp, dtype) + return xp.unique_values(a) + @testing.parameterize(*testing.product({"trim": ["fb", "f", "b"]})) class TestTrim_zeros(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_trim_non_zeros(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py index 4cba30bdba78..4650e5130f1e 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_basic.py @@ -1,6 +1,6 @@ +import itertools import warnings -import dpctl import numpy import pytest @@ -14,6 +14,7 @@ class TestBasic: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_copyto(self, xp, dtype): @@ -95,6 +96,15 @@ def test_copyto_where(self, xp, dtype): xp.copyto(a, b, where=c) return a + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_copyto_where_squeeze_broadcast(self, xp, dtype): + a = testing.shaped_arange((2, 3, 4), xp, dtype) + b = testing.shaped_reverse_arange((1, 2, 1, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, "?") + xp.copyto(a, b, where=c) + return a + @pytest.mark.parametrize("shape", [(2, 3, 4), (0,)]) @testing.for_all_dtypes(no_bool=True) def test_copyto_where_raises(self, dtype, shape): @@ -105,38 +115,94 @@ def test_copyto_where_raises(self, dtype, shape): with pytest.raises(TypeError): xp.copyto(a, b, where=c) + def _check_copyto_where_multigpu_raises(self, dtype, ngpus): + def get_numpy(): + a = testing.shaped_arange((2, 3, 4), numpy, dtype) + b = testing.shaped_reverse_arange((2, 3, 4), numpy, dtype) + c = testing.shaped_arange((2, 3, 4), numpy, "?") + numpy.copyto(a, b, where=c) + return a + + for dev1, dev2, dev3, dev4 in itertools.product(*[range(ngpus)] * 4): + if dev1 == dev2 == dev3 == dev4: + continue + if not dev1 <= dev2 <= dev3 <= dev4: + continue + + with cuda.Device(dev1): + a = testing.shaped_arange((2, 3, 4), cupy, dtype) + with cuda.Device(dev2): + b = testing.shaped_reverse_arange((2, 3, 4), cupy, dtype) + with cuda.Device(dev3): + c = testing.shaped_arange((2, 3, 4), cupy, "?") + with cuda.Device(dev4): + if all( + [ + (peer == dev4) + or (cuda.runtime.deviceCanAccessPeer(dev4, peer) == 1) + for peer in (dev1, dev2, dev3) + ] + ): + with pytest.warns(cupy._util.PerformanceWarning): + cupy.copyto(a, b, where=c) + else: + with pytest.raises( + ValueError, match="Peer access is unavailable" + ): + cupy.copyto(a, b, where=c) + + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(2) @testing.for_all_dtypes() - def test_copyto_where_multidevice_raises(self, dtype): - a = testing.shaped_arange( - (2, 3, 4), cupy, dtype, device=dpctl.SyclQueue() - ) - b = testing.shaped_reverse_arange( - (2, 3, 4), cupy, dtype, device=dpctl.SyclQueue() - ) - c = testing.shaped_arange( - (2, 3, 4), cupy, "?", device=dpctl.SyclQueue() - ) - with pytest.raises( - dpctl.utils.ExecutionPlacementError, - match="arrays have different associated queues", - ): - cupy.copyto(a, b, where=c) + def test_copyto_where_multigpu_raises(self, dtype): + self._check_copyto_where_multigpu_raises(dtype, 2) + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(4) @testing.for_all_dtypes() - def test_copyto_noncontinguous(self, dtype): - src = testing.shaped_arange((2, 3, 4), cupy, dtype) - src = src.swapaxes(0, 1) + def test_copyto_where_multigpu_raises_4(self, dtype): + self._check_copyto_where_multigpu_raises(dtype, 4) - dst = cupy.empty_like(src) - cupy.copyto(dst, src) + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(6) + @testing.for_all_dtypes() + def test_copyto_where_multigpu_raises_6(self, dtype): + self._check_copyto_where_multigpu_raises(dtype, 6) + + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(2) + @testing.for_all_dtypes() + @testing.numpy_cupy_array_equal() + def test_copyto_multigpu(self, xp, dtype): + with cuda.Device(0): + a = testing.shaped_arange((2, 3, 4), xp, dtype) + with cuda.Device(1): + b = xp.empty((2, 3, 4), dtype=dtype) + xp.copyto(b, a) + return b + + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(2) + @testing.for_all_dtypes() + def test_copyto_multigpu_noncontinguous(self, dtype): + with cuda.Device(0): + src = testing.shaped_arange((2, 3, 4), cupy, dtype) + src = src.swapaxes(0, 1) + with cuda.Device(1): + dst = cupy.empty_like(src) + cupy.copyto(dst, src) expected = testing.shaped_arange((2, 3, 4), numpy, dtype) expected = expected.swapaxes(0, 1) - testing.assert_array_equal(expected, src) - testing.assert_array_equal(expected, dst) + testing.assert_array_equal(expected, src.get()) + testing.assert_array_equal(expected, dst.get()) +@pytest.mark.skipif( + numpy.__version__ < "2", + reason="XXX: NP2.0: copyto is in flux in numpy 2.0.0rc2", +) @testing.parameterize( *testing.product( { @@ -146,15 +212,16 @@ def test_copyto_noncontinguous(self, dtype): ) ) class TestCopytoFromScalar: + @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(accept_error=TypeError) + @testing.numpy_cupy_allclose(accept_error=(TypeError, OverflowError)) def test_copyto(self, xp, dtype): dst = xp.ones(self.dst_shape, dtype=dtype) xp.copyto(dst, self.src) return dst @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(accept_error=TypeError) + @testing.numpy_cupy_allclose(accept_error=(TypeError, OverflowError)) def test_copyto_where(self, xp, dtype): dst = xp.ones(self.dst_shape, dtype=dtype) mask = (testing.shaped_arange(self.dst_shape, xp, dtype) % 2).astype( @@ -164,18 +231,15 @@ def test_copyto_where(self, xp, dtype): return dst +@testing.with_requires("numpy>=2.1") @pytest.mark.parametrize( "casting", ["no", "equiv", "safe", "same_kind", "unsafe"] ) class TestCopytoFromNumpyScalar: + @testing.for_all_dtypes_combination(("dtype1", "dtype2")) @testing.numpy_cupy_allclose(accept_error=TypeError) def test_copyto(self, xp, dtype1, dtype2, casting): - if casting == "safe": - pytest.skip( - "NEP50 doesn't work properly in numpy with casting='safe'" - ) - dst = xp.zeros((2, 3, 4), dtype=dtype1) src = numpy.array(1, dtype=dtype2) with warnings.catch_warnings(): @@ -192,7 +256,6 @@ def test_copyto(self, xp, dtype1, dtype2, casting): def test_copyto2(self, xp, make_src, dtype, casting): dst = xp.zeros((2, 3, 4), dtype=dtype) src = make_src(dtype) - with warnings.catch_warnings(): warnings.simplefilter("ignore", ComplexWarning) xp.copyto(dst, src, casting) @@ -201,11 +264,6 @@ def test_copyto2(self, xp, make_src, dtype, casting): @testing.for_all_dtypes_combination(("dtype1", "dtype2")) @testing.numpy_cupy_allclose(accept_error=TypeError) def test_copyto_where(self, xp, dtype1, dtype2, casting): - if casting == "safe": - pytest.skip( - "NEP50 doesn't work properly in numpy with casting='safe'" - ) - shape = (2, 3, 4) dst = xp.ones(shape, dtype=dtype1) src = numpy.array(1, dtype=dtype2) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py index 56d300447736..7355d07e1d9b 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py @@ -9,16 +9,18 @@ class TestDims(unittest.TestCase): + def check_atleast(self, func, xp): a = testing.shaped_arange((), xp) b = testing.shaped_arange((2,), xp) c = testing.shaped_arange((2, 2), xp) d = testing.shaped_arange((4, 3, 2), xp) - e = 1 - f = numpy.float32(1) - return func(a, b, c, d, e, f) + # scalar input is not supported + # e = 1 + # f = numpy.float32(1) + # return func(a, b, c, d, e, f) + return func(a, b, c, d) - @pytest.mark.skip(reason="Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_atleast_1d1(self, xp): return self.check_atleast(xp.atleast_1d, xp) @@ -28,7 +30,6 @@ def test_atleast_1d2(self, xp): a = testing.shaped_arange((1, 3, 2), xp) return xp.atleast_1d(a) - @pytest.mark.skip(reason="Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_atleast_2d1(self, xp): return self.check_atleast(xp.atleast_2d, xp) @@ -38,7 +39,6 @@ def test_atleast_2d2(self, xp): a = testing.shaped_arange((1, 3, 2), xp) return xp.atleast_2d(a) - @pytest.mark.skip(reason="Scalar input is not supported") @testing.numpy_cupy_array_equal() def test_atleast_3d1(self, xp): return self.check_atleast(xp.atleast_3d, xp) @@ -295,10 +295,12 @@ def test_external_squeeze(self, xp): {"shapes": [(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]}, ) class TestBroadcast(unittest.TestCase): + def _broadcast(self, xp, dtype, shapes): arrays = [testing.shaped_arange(s, xp, dtype) for s in shapes] return xp.broadcast(*arrays) + @pytest.mark.skip("broadcast() is not supported yet") @testing.for_all_dtypes() def test_broadcast(self, dtype): broadcast_np = self._broadcast(numpy, dtype, self.shapes) @@ -337,6 +339,8 @@ def test_broadcast_arrays(self, xp, dtype): {"shapes": [(0,), (2,)]}, ) class TestInvalidBroadcast(unittest.TestCase): + + @pytest.mark.skip("broadcast() is not supported yet") @testing.for_all_dtypes() def test_invalid_broadcast(self, dtype): for xp in (numpy, cupy): diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_join.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_join.py index 5e1cfe4647b1..f6e97e571391 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_join.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_join.py @@ -12,6 +12,7 @@ class TestJoin: + @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) @@ -94,6 +95,20 @@ def test_concatenate_large_5(self, xp, dtype): b = testing.shaped_reverse_arange((2, 3, 4), xp, "i") return xp.concatenate((a, b) * 10, axis=-1) + @pytest.mark.skip("multi GPU is not supported") + @testing.multi_gpu(2) + def test_concatenate_large_different_devices(self): + arrs = [] + for i in range(10): + with cuda.Device(i % 2): + arrs.append(cupy.empty((2, 3, 4))) + if cuda.runtime.deviceCanAccessPeer(0, 1) == 1: + with pytest.warns(cupy._util.PerformanceWarning): + cupy.concatenate(arrs) + else: + with pytest.raises(ValueError): + cupy.concatenate(arrs) + @testing.for_all_dtypes(name="dtype") @testing.numpy_cupy_array_equal() def test_concatenate_f_contiguous(self, xp, dtype): @@ -112,14 +127,12 @@ def test_concatenate_large_f_contiguous(self, xp, dtype): e = testing.shaped_arange((2, 3, 2), xp, dtype) return xp.concatenate((a, b, c, d, e) * 2, axis=-1) - @pytest.mark.skip(reason="lead to crash due to reported issue in OCL RT") @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_concatenate_many_multi_dtype(self, xp): a = testing.shaped_arange((2, 1), xp, "i") b = testing.shaped_arange((2, 1), xp, "f") return xp.concatenate((a, b) * 1024, axis=1) - @pytest.mark.skip("dpnp.int8 is not supported yet") @testing.slow def test_concatenate_32bit_boundary(self): a = cupy.zeros((2**30,), dtype=cupy.int8) @@ -129,7 +142,7 @@ def test_concatenate_32bit_boundary(self): del b del ret # Free huge memory for slow test - cupy.get_default_memory_pool().free_all_blocks() + # cupy.get_default_memory_pool().free_all_blocks() def test_concatenate_wrong_ndim(self): a = cupy.empty((2, 3)) @@ -156,36 +169,40 @@ def test_concatenate_out(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_concatenate_out_same_kind(self, xp): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) out = xp.zeros((3, 12), dtype=xp.float32) xp.concatenate((a, b, c), axis=1, out=out) return out def test_concatenate_out_invalid_shape(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) - out = xp.zeros((4, 10), dtype=xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) + out = xp.zeros((4, 10), dtype=dtype) with pytest.raises(ValueError): xp.concatenate((a, b, c), axis=1, out=out) def test_concatenate_out_invalid_shape_2(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) - out = xp.zeros((2, 2, 10), dtype=xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) + out = xp.zeros((2, 2, 10), dtype=dtype) with pytest.raises(ValueError): xp.concatenate((a, b, c), axis=1, out=out) def test_concatenate_out_invalid_dtype(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) out = xp.zeros((3, 12), dtype=xp.int64) with pytest.raises(TypeError): xp.concatenate((a, b, c), axis=1, out=out) @@ -216,19 +233,31 @@ def test_concatenate_dtype(self, xp, dtype1, dtype2): @testing.with_requires("numpy>=1.20.0") def test_concatenate_dtype_invalid_out(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_arange((3, 4), xp, xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_arange((3, 4), xp, dtype) out = xp.zeros((6, 4), dtype=xp.int64) with pytest.raises(TypeError): xp.concatenate((a, b), out=out, dtype=xp.int64) @testing.with_requires("numpy>=1.20.0") - @testing.for_castings() + # @pytest.mark.filterwarnings("error::cupy.exceptions.ComplexWarning") + @pytest.mark.parametrize( + "casting", + [ + "no", + "equiv", + "safe", + "same_kind", + "unsafe", + ], + ) @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_array_equal(accept_error=(TypeError, ComplexWarning)) def test_concatenate_casting(self, xp, dtype1, dtype2, casting): a = testing.shaped_arange((3, 4), xp, dtype1) b = testing.shaped_arange((3, 4), xp, dtype1) + # may raise TypeError or ComplexWarning return xp.concatenate((a, b), dtype=dtype2, casting=casting) @testing.numpy_cupy_array_equal() @@ -282,7 +311,17 @@ def test_hstack_dtype(self, xp, dtype1, dtype2): return xp.hstack((a, b), dtype=dtype2) @testing.with_requires("numpy>=1.24.0") - @testing.for_castings() + # @pytest.mark.filterwarnings("error::cupy.exceptions.ComplexWarning") + @pytest.mark.parametrize( + "casting", + [ + "no", + "equiv", + "safe", + "same_kind", + "unsafe", + ], + ) @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_array_equal(accept_error=(TypeError, ComplexWarning)) def test_hstack_casting(self, xp, dtype1, dtype2, casting): @@ -317,7 +356,17 @@ def test_vstack_dtype(self, xp, dtype1, dtype2): return xp.vstack((a, b), dtype=dtype2) @testing.with_requires("numpy>=1.24.0") - @testing.for_castings() + # @pytest.mark.filterwarnings("error::cupy.exceptions.ComplexWarning") + @pytest.mark.parametrize( + "casting", + [ + "no", + "equiv", + "safe", + "same_kind", + "unsafe", + ], + ) @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_array_equal(accept_error=(TypeError, ComplexWarning)) def test_vstack_casting(self, xp, dtype1, dtype2, casting): @@ -410,36 +459,40 @@ def test_stack_out(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_stack_out_same_kind(self, xp): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) out = xp.zeros((3, 3, 4), dtype=xp.float32) xp.stack((a, b, c), axis=1, out=out) return out def test_stack_out_invalid_shape(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) - out = xp.zeros((3, 3, 10), dtype=xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) + out = xp.zeros((3, 3, 10), dtype=dtype) with pytest.raises(ValueError): xp.stack((a, b, c), axis=1, out=out) def test_stack_out_invalid_shape_2(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) - out = xp.zeros((3, 3, 3, 10), dtype=xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) + out = xp.zeros((3, 3, 3, 10), dtype=dtype) with pytest.raises(ValueError): xp.stack((a, b, c), axis=1, out=out) def test_stack_out_invalid_dtype(self): for xp in (numpy, cupy): - a = testing.shaped_arange((3, 4), xp, xp.float32) - b = testing.shaped_reverse_arange((3, 4), xp, xp.float32) - c = testing.shaped_arange((3, 4), xp, xp.float32) + dtype = cupy.default_float_type() + a = testing.shaped_arange((3, 4), xp, dtype) + b = testing.shaped_reverse_arange((3, 4), xp, dtype) + c = testing.shaped_arange((3, 4), xp, dtype) out = xp.zeros((3, 3, 4), dtype=xp.int64) with pytest.raises(TypeError): xp.stack((a, b, c), axis=1, out=out) @@ -453,7 +506,17 @@ def test_stack_dtype(self, xp, dtype1, dtype2): return xp.stack((a, b), dtype=dtype2) @testing.with_requires("numpy>=1.24.0") - @testing.for_castings() + # @pytest.mark.filterwarnings("error::cupy.exceptions.ComplexWarning") + @pytest.mark.parametrize( + "casting", + [ + "no", + "equiv", + "safe", + "same_kind", + "unsafe", + ], + ) @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_array_equal(accept_error=(TypeError, ComplexWarning)) def test_stack_casting(self, xp, dtype1, dtype2, casting): @@ -462,6 +525,8 @@ def test_stack_casting(self, xp, dtype1, dtype2, casting): # may raise TypeError or ComplexWarning return xp.stack((a, b), dtype=dtype2, casting=casting) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) @@ -474,17 +539,17 @@ def test_row_stack(self, xp, dtype1, dtype2): def test_row_stack_wrong_ndim1(self): a = cupy.zeros(()) b = cupy.zeros((3,)) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # pytest.warns(DeprecationWarning): cupy.row_stack((a, b)) def test_row_stack_wrong_ndim2(self): a = cupy.zeros((3, 2, 3)) b = cupy.zeros((3, 2)) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # pytest.warns(DeprecationWarning): cupy.row_stack((a, b)) def test_row_stack_wrong_shape(self): a = cupy.zeros((3, 2)) b = cupy.zeros((4, 3)) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # pytest.warns(DeprecationWarning): cupy.row_stack((a, b)) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_kind.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_kind.py index 7acf31b715c8..b327b91a5616 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_kind.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_kind.py @@ -4,11 +4,11 @@ import pytest import dpnp as cupy -from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing class TestKind(unittest.TestCase): + @testing.for_orders("CFAK") @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -28,22 +28,6 @@ def test_asarray_chkfinite_non_finite_vals(self, dtype, order): with pytest.raises(error): xp.asarray_chkfinite(a, dtype=dtype, order=order) - @testing.with_requires("numpy<2.0") - @testing.for_all_dtypes() - def test_asfarray(self, dtype): - a = cupy.asarray([1, 2, 3]) - a_gpu = cupy.asfarray(a, dtype) - a_cpu = numpy.asfarray(a, dtype) - if ( - has_support_aspect64() - or cupy.issubdtype(dtype, cupy.complexfloating) - or cupy.issubdtype(dtype, cupy.floating) - ): - assert a_cpu.dtype == a_gpu.dtype - else: - assert a_cpu.dtype == cupy.float64 - assert a_gpu.dtype == cupy.float32 - @testing.for_all_dtypes() def test_asfortranarray1(self, dtype): def func(xp): @@ -51,6 +35,9 @@ def func(xp): ret = xp.asfortranarray(x) assert x.flags.c_contiguous assert ret.flags.f_contiguous + if xp is cupy: + return tuple(el * ret.itemsize for el in ret.strides) + return ret.strides assert func(numpy) == func(cupy) @@ -61,6 +48,9 @@ def func(xp): ret = xp.asfortranarray(x) assert x.flags.c_contiguous assert ret.flags.f_contiguous + if xp is cupy: + return tuple(el * ret.itemsize for el in ret.strides) + return ret.strides assert func(numpy) == func(cupy) @@ -71,6 +61,9 @@ def func(xp): ret = xp.asfortranarray(xp.asfortranarray(x)) assert x.flags.c_contiguous assert ret.flags.f_contiguous + if xp is cupy: + return tuple(el * ret.itemsize for el in ret.strides) + return ret.strides assert func(numpy) == func(cupy) @@ -81,6 +74,9 @@ def func(xp): x = xp.transpose(x, (1, 0)) ret = xp.asfortranarray(x) assert ret.flags.f_contiguous + if xp is cupy: + return tuple(el * ret.itemsize for el in ret.strides) + return ret.strides assert func(numpy) == func(cupy) @@ -91,6 +87,9 @@ def func(xp): ret = xp.asfortranarray(x) assert x.flags.c_contiguous assert ret.flags.f_contiguous + if xp is cupy: + return tuple(el * ret.itemsize for el in ret.strides) + return ret.strides assert func(numpy) == func(cupy) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_rearrange.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_rearrange.py index 4225902c2685..7c97ab1c0ebf 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_rearrange.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_rearrange.py @@ -26,6 +26,7 @@ {"shape": (5, 2), "shift": (2, 1, 3), "axis": None}, ) class TestRoll(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_roll(self, xp, dtype): @@ -37,17 +38,27 @@ def test_roll(self, xp, dtype): def test_roll_cupy_shift(self, xp, dtype): x = testing.shaped_arange(self.shape, xp, dtype) shift = self.shift + if xp is cupy: + shift = cupy.array(shift) return xp.roll(x, shift, axis=self.axis) class TestRollTypeError(unittest.TestCase): - # TODO: update, once dpctl#1857 is resolved - @testing.with_requires("numpy<2.1.2") # done in numpy#27437 + + @pytest.mark.skip("castable string shift is not supported") + @testing.with_requires("numpy>=2.1.2") + def test_roll_invalid_shift_castable(self): + for xp in (numpy, cupy): + x = testing.shaped_arange((5, 2), xp) + # Weird but works due to `int` call + xp.roll(x, "0", axis=0) + + @testing.with_requires("numpy>=2.1.2") def test_roll_invalid_shift(self): for xp in (numpy, cupy): x = testing.shaped_arange((5, 2), xp) - with pytest.raises(TypeError): - xp.roll(x, "0", axis=0) + with pytest.raises((ValueError, TypeError)): + xp.roll(x, "a", axis=0) def test_roll_invalid_axis_type(self): for xp in (numpy, cupy): @@ -75,11 +86,14 @@ def test_roll_invalid_cupy_shift(self): for xp in (numpy, cupy): x = testing.shaped_arange(self.shape, xp) shift = self.shift + if xp is cupy: + shift = cupy.array(shift) with pytest.raises(ValueError): xp.roll(x, shift, axis=self.axis) class TestFliplr(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_fliplr_2(self, xp, dtype): @@ -101,6 +115,7 @@ def test_fliplr_insufficient_ndim(self, dtype): class TestFlipud(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_flipud_1(self, xp, dtype): @@ -122,6 +137,7 @@ def test_flipud_insufficient_ndim(self, dtype): class TestFlip(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_flip_1(self, xp, dtype): @@ -205,6 +221,7 @@ def test_flip_invalid_negative_axis(self, dtype): class TestRot90(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_rot90_none(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_shape.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_shape.py index b09a39c657d0..49f22951e4b8 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_shape.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_shape.py @@ -8,6 +8,7 @@ @pytest.mark.parametrize("shape", [(2, 3), (), (4,)]) class TestShape: + def test_shape(self, shape): for xp in (numpy, cupy): a = testing.shaped_arange(shape, xp) @@ -20,10 +21,13 @@ def test_shape_list(self, shape): class TestReshape: - def test_reshape_shapes(self): + + def test_reshape_strides(self): def func(xp): a = testing.shaped_arange((1, 1, 1, 2, 2), xp) - return a.shape + if xp is cupy: + return tuple(el * a.itemsize for el in a.strides) + return a.strides assert func(numpy) == func(cupy) @@ -98,7 +102,10 @@ def test_reshape_zerosize_invalid_unknown(self): def test_reshape_zerosize(self, xp): a = xp.zeros((0,)) b = a.reshape((0,)) - # assert b.base is a + if xp is cupy: + assert a.get_array()._pointer == b.get_array()._pointer + else: + assert b.base is a return b @testing.for_orders("CFA") @@ -106,7 +113,10 @@ def test_reshape_zerosize(self, xp): def test_reshape_zerosize2(self, xp, order): a = xp.zeros((2, 0, 3)) b = a.reshape((5, 0, 4), order=order) - # assert b.base is a + if xp is cupy: + assert a.get_array()._pointer == b.get_array()._pointer + else: + assert b.base is a return b @testing.for_orders("CFA") @@ -141,6 +151,7 @@ def test_ndim_limit2(self, dtype, order): class TestRavel: + @testing.for_orders("CFA") # order = 'K' is not supported currently @testing.numpy_cupy_array_equal() @@ -233,6 +244,7 @@ def test_external_ravel(self, xp): ], ) class TestReshapeOrder: + def test_reshape_contiguity(self, order_init, order_reshape, shape_in_out): shape_init, shape_final = shape_in_out @@ -247,4 +259,5 @@ def test_reshape_contiguity(self, order_init, order_reshape, shape_in_out): assert b_cupy.flags.f_contiguous == b_numpy.flags.f_contiguous assert b_cupy.flags.c_contiguous == b_numpy.flags.c_contiguous + # testing.assert_array_equal(b_cupy.strides, b_numpy.strides) testing.assert_array_equal(b_cupy, b_numpy) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_split.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_split.py index 07d5c0546f67..657a316223fc 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_split.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_split.py @@ -1,10 +1,10 @@ import unittest -import dpnp as cupy from dpnp.tests.third_party.cupy import testing class TestSplit(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_array_split1(self, xp): a = testing.shaped_arange((3, 11), xp) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_tiling.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_tiling.py index 63d1144e0148..a8a1f06da47f 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_tiling.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_tiling.py @@ -17,6 +17,7 @@ {"repeats": [1, 2, 3], "axis": -2}, ) class TestRepeat(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_array_repeat(self, xp): x = testing.shaped_arange((2, 3, 4), xp) @@ -24,6 +25,7 @@ def test_array_repeat(self, xp): class TestRepeatRepeatsNdarray(unittest.TestCase): + def test_func(self): a = testing.shaped_arange((2, 3, 4), cupy) repeats = cupy.array([2, 3], dtype=cupy.int32) @@ -61,6 +63,7 @@ def test_array_repeat(self, xp): {"repeats": [1, 2, 3, 4], "axis": 0}, ) class TestRepeat1D(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_array_repeat(self, xp): x = testing.shaped_arange((4,), xp) @@ -89,6 +92,7 @@ def test_array_repeat(self, xp): {"repeats": 2, "axis": 3}, ) class TestRepeatFailure(unittest.TestCase): + def test_repeat_failure(self): for xp in (numpy, cupy): x = testing.shaped_arange((2, 3, 4), xp) @@ -105,6 +109,7 @@ def test_repeat_failure(self): {"reps": (2, 3, 4, 5)}, ) class TestTile(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_array_tile(self, xp): x = testing.shaped_arange((2, 3, 4), xp) @@ -116,6 +121,7 @@ def test_array_tile(self, xp): {"reps": (-1, -2)}, ) class TestTileFailure(unittest.TestCase): + def test_tile_failure(self): for xp in (numpy, cupy): x = testing.shaped_arange((2, 3, 4), xp) diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py index af8b2f3978fc..7e7a62dce52a 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py @@ -9,6 +9,7 @@ class TestTranspose(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_moveaxis1(self, xp): a = testing.shaped_arange((2, 3, 4), xp) @@ -52,6 +53,12 @@ def test_moveaxis_invalid1_2(self): with pytest.raises(AxisError): xp.moveaxis(a, [0, 1], [1, 3]) + def test_moveaxis_invalid1_3(self): + for xp in (numpy, cupy): + a = testing.shaped_arange((2, 3, 4), xp) + with pytest.raises(AxisError): + xp.moveaxis(a, 0, 3) + # dim is too small def test_moveaxis_invalid2_1(self): for xp in (numpy, cupy): @@ -158,12 +165,37 @@ def test_external_transpose(self, xp): a = testing.shaped_arange((2, 3, 4), xp) return xp.transpose(a, (-1, 0, 1)) - @testing.numpy_cupy_array_equal() - def test_external_transpose_5d(self, xp): - a = testing.shaped_arange((2, 3, 4, 5, 6), xp) - return xp.transpose(a, (1, 0, 3, 4, 2)) - @testing.numpy_cupy_array_equal() def test_external_transpose_all(self, xp): a = testing.shaped_arange((2, 3, 4), xp) return xp.transpose(a) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +class TestMatrixTranspose: + + @testing.with_requires("numpy>=2.0") + def test_matrix_transpose_raises_error_for_1d(self): + msg = "matrix transpose with ndim < 2 is undefined" + arr = cupy.arange(48) + with pytest.raises(ValueError, match=msg): + arr.mT + + @testing.numpy_cupy_array_equal() + def test_matrix_transpose_equals_transpose_2d(self, xp): + arr = xp.arange(48).reshape((6, 8)) + return arr + + @testing.with_requires("numpy>=2.0") + @pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) + @testing.numpy_cupy_array_equal() + def test_matrix_transpose_equals_swapaxes(self, xp, shape): + vec = xp.arange(shape[-1]) + arr = xp.broadcast_to(vec, shape) + return arr.mT diff --git a/dpnp/tests/third_party/cupy/math_tests/test_arithmetic.py b/dpnp/tests/third_party/cupy/math_tests/test_arithmetic.py index 924c72c7ccea..a20b287c18b4 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_arithmetic.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_arithmetic.py @@ -58,6 +58,7 @@ ) ) class TestArithmeticRaisesWithNumpyInput: + def test_raises_with_numpy_input(self): nargs = self.nargs name = self.name @@ -82,6 +83,8 @@ def test_raises_with_numpy_input(self): testing.shaped_arange((2, 3), numpy, dtype=d) for d in all_types ] + # scalar input is not supported + # + [0, 0.0j, 0j, 2, 2.0, 2j, True, False] ), "name": ["conj", "conjugate", "real", "imag"], } @@ -93,6 +96,8 @@ def test_raises_with_numpy_input(self): testing.shaped_arange((2, 3), numpy, dtype=d) for d in all_types ] + # scalar input is not supported + # + [0, 0.0j, 0j, 2, 2.0, 2j, True, False] ), "deg": [True, False], "name": ["angle"], @@ -105,6 +110,8 @@ def test_raises_with_numpy_input(self): numpy.array([-3, -2, -1, 1, 2, 3], dtype=d) for d in negative_types_wo_fp16 ] + # scalar input is not supported + # + [0, 0.0j, 0j, 2, 2.0, 2j, -2, -2.0, -2j, True, False] ), "deg": [True, False], "name": ["angle"], @@ -117,6 +124,8 @@ def test_raises_with_numpy_input(self): testing.shaped_arange((2, 3), numpy, dtype=d) + 1 for d in all_types ] + # scalar input is not supported + # + [2, 2.0, 2j, True] ), "name": ["reciprocal"], } @@ -124,6 +133,7 @@ def test_raises_with_numpy_input(self): ) ) class TestArithmeticUnary: + @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def test_unary(self, xp): arg1 = self.arg1 @@ -170,6 +180,7 @@ def test_unary(self, xp): ) ) class TestComplex: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_real_ndarray_nocomplex(self, xp, dtype): @@ -254,6 +265,7 @@ def test_imag_complex(self, xp, dtype): class ArithmeticBinaryBase: + @testing.numpy_cupy_allclose(rtol=1e-4, type_check=has_support_aspect64()) def check_binary(self, xp): arg1 = self.arg1 @@ -266,18 +278,6 @@ def check_binary(self, xp): if xp.isscalar(arg1) and xp.isscalar(arg2): pytest.skip("both scalar inputs is not supported") - if self.name == "power" or self.name == "float_power": - # TODO(niboshi): Fix this: power(0, 1j) - # numpy => 1+0j - # cupy => 0j - if dtype2 in complex_types and (np1 == 0).any(): - return xp.array(True) - # TODO: Fix this: power(0j, 0) - # numpy => 1+0j - # cupy => nan+nanj - elif dtype1 in complex_types and (np2 == 0).any(): - return xp.array(True) - if self.name in ("true_divide", "floor_divide", "fmod", "remainder"): if dtype1.kind in "u" and xp.isscalar(arg2) and arg2 < 0: # TODO: Fix this: array(3, dtype=uint) / -2 @@ -290,6 +290,18 @@ def check_binary(self, xp): # cupy => 84.666667 pytest.skip("due to dpctl gh-1711") + if self.name == "power" or self.name == "float_power": + # TODO(niboshi): Fix this: power(0, 1j) + # numpy => 1+0j + # cupy => 0j + if dtype2 in complex_types and (np1 == 0).any(): + return xp.array(True) + # TODO: Fix this: power(0j, 0) + # numpy => 1+0j + # cupy => nan+nanj + elif dtype1 in complex_types and (np2 == 0).any(): + return xp.array(True) + if isinstance(arg1, numpy.ndarray): arg1 = xp.asarray(arg1) if isinstance(arg2, numpy.ndarray): @@ -340,6 +352,17 @@ def check_binary(self, xp): y = y.astype(cupy.float64) elif y.dtype == cupy.complex64: y = y.astype(cupy.complex128) + + # NumPy returns different values (nan/inf) on division by zero + # depending on the architecture. + # As it is not possible for CuPy to replicate this behavior, we ignore + # the difference here. + if self.name in ("floor_divide", "remainder"): + if y.dtype in (float_types + complex_types) and (np2 == 0).any(): + y = xp.asarray(y) + y[y == numpy.inf] = numpy.nan + y[y == -numpy.inf] = numpy.nan + return y @@ -379,6 +402,7 @@ def check_binary(self, xp): ) ) class TestArithmeticBinary(ArithmeticBinaryBase): + def test_binary(self): self.use_dtype = False self.check_binary() @@ -437,23 +461,6 @@ def test_binary(self): "use_dtype": [True, False], } ) - + testing.product( - { - "arg1": [ - testing.shaped_arange((2, 3), numpy, dtype=d) - for d in no_complex_types - ] - + [0, 0.0, 2, 2.0, -2, -2.0, True, False], - "arg2": [ - testing.shaped_reverse_arange((2, 3), numpy, dtype=d) - for d in no_complex_types - ] - + [0, 0.0, 2, 2.0, -2, -2.0, True, False], - "name": ["floor_divide", "fmod", "remainder"], - "dtype": [cupy.default_float_type()], - "use_dtype": [True, False], - } - ) + testing.product( { "arg1": [ @@ -474,12 +481,59 @@ def test_binary(self): ) ) class TestArithmeticBinary2(ArithmeticBinaryBase): + def test_binary(self): self.check_binary() +@testing.with_requires("numpy>=2.0") +class TestArithmeticBinary3(ArithmeticBinaryBase): + + @pytest.mark.parametrize( + "arg1", + [ + testing.shaped_arange((2, 3), numpy, dtype=d) + for d in no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0, True, False], + ) + @pytest.mark.parametrize( + "arg2", + [ + testing.shaped_reverse_arange((2, 3), numpy, dtype=d) + for d in no_complex_types + ] + + [0, 0.0, 2, 2.0, -2, -2.0, True, False], + ) + @pytest.mark.parametrize("name", ["floor_divide", "fmod", "remainder"]) + @pytest.mark.parametrize("dtype", [cupy.default_float_type()]) + @pytest.mark.parametrize("use_dtype", [True, False]) + @testing.numpy_cupy_allclose( + accept_error=OverflowError, type_check=has_support_aspect64() + ) + def test_both_raise(self, arg1, arg2, name, dtype, use_dtype, xp): + if xp.isscalar(arg1) and xp.isscalar(arg2): + pytest.skip("both scalar inputs is not supported") + + func = getattr(xp, name) + + if isinstance(arg1, numpy.ndarray): + arg1 = xp.asarray(arg1) + if isinstance(arg2, numpy.ndarray): + arg2 = xp.asarray(arg2) + + dtype_arg = {"dtype": dtype} if use_dtype else {} + with numpy.errstate(divide="ignore"): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + y = func(arg1, arg2, **dtype_arg) + + return y + + @pytest.mark.skip("'casting' keyword is not supported yet") class UfuncTestBase: + @testing.numpy_cupy_allclose(accept_error=TypeError) def check_casting_out(self, in0_type, in1_type, out_type, casting, xp): a = testing.shaped_arange((2, 3), xp, in0_type) @@ -524,6 +578,7 @@ def check_casting_dtype_unsafe_ignore_warnings( class TestUfunc(UfuncTestBase): + @pytest.mark.parametrize( "casting", [ @@ -683,6 +738,7 @@ def test_casting_dtype_unsafe_ignore_warnings( class TestArithmeticModf: + @testing.for_float_dtypes() @testing.numpy_cupy_allclose() def test_modf(self, xp, dtype): @@ -698,6 +754,7 @@ def test_modf(self, xp, dtype): *testing.product({"xp": [numpy, cupy], "shape": [(3, 2), (), (3, 0, 2)]}) ) class TestBoolSubtract: + def test_bool_subtract(self): xp = self.xp shape = self.shape diff --git a/dpnp/tests/third_party/cupy/math_tests/test_explog.py b/dpnp/tests/third_party/cupy/math_tests/test_explog.py index b8da9575b44e..a377b90e3b22 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_explog.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_explog.py @@ -1,5 +1,3 @@ -import warnings - import numpy import pytest @@ -8,6 +6,7 @@ class TestExplog: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def check_unary(self, name, xp, dtype, no_complex=False): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_floating.py b/dpnp/tests/third_party/cupy/math_tests/test_floating.py index 5042953caf52..3bfb4bb00430 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_floating.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_floating.py @@ -9,6 +9,7 @@ class TestFloating(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() def test_signbit(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_hyperbolic.py b/dpnp/tests/third_party/cupy/math_tests/test_hyperbolic.py index 34f7cc2250dd..f195b041b5b2 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_hyperbolic.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_hyperbolic.py @@ -5,6 +5,7 @@ class TestHyperbolic(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def check_unary(self, name, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_matmul.py b/dpnp/tests/third_party/cupy/math_tests/test_matmul.py index fe2e833882c7..35852d25dbe7 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_matmul.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_matmul.py @@ -59,6 +59,7 @@ ) ) class TestMatmul(unittest.TestCase): + @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_allclose( @@ -95,6 +96,7 @@ def test_cupy_matmul(self, xp, dtype1, dtype2): ) ) class TestMatmulOut(unittest.TestCase): + @testing.for_all_dtypes(name="dtype1") @testing.for_all_dtypes(name="dtype2") @testing.numpy_cupy_allclose( @@ -121,6 +123,7 @@ def test_cupy_matmul_out_cast(self, xp, dtype1, dtype2): class TestMatmulOutOverlap: + @pytest.mark.parametrize( "shape", [ @@ -136,6 +139,7 @@ def test_overlap_both(self, xp, dtype, shape): class TestMatmulStrides: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_relaxed_c_contiguous_input(self, xp, dtype): @@ -164,6 +168,7 @@ def test_relaxed_c_contiguous_input(self, xp, dtype): ) ) class TestMatmulLarge(unittest.TestCase): + # Avoid overflow skip_dtypes = { (numpy.int8, numpy.uint8), @@ -209,7 +214,7 @@ def test_cupy_matmul(self, xp, dtype1, dtype2): @pytest.mark.parametrize( - "shape1, shape2", + "shape1,shape2", [ # the first one causes overflow which is undefined behavior # ((256, 256, 3, 2), (256, 256, 2, 4)), @@ -218,6 +223,7 @@ def test_cupy_matmul(self, xp, dtype1, dtype2): ], ) class TestMatmulIntegralLargeBatch: + @testing.for_int_dtypes(name="dtype") @testing.numpy_cupy_array_equal() def test_operator_matmul(self, xp, dtype, shape1, shape2): @@ -235,6 +241,7 @@ def test_cupy_matmul(self, xp, dtype, shape1, shape2): @pytest.mark.skip("overflow is undefined behavior.") class TestMatmulOverflow(unittest.TestCase): + @testing.for_int_dtypes(name="dtype", no_bool=True) @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_overflow(self, xp, dtype): @@ -244,6 +251,125 @@ def test_overflow(self, xp, dtype): return xp.matmul(a, b) +@pytest.mark.skip("set_compute_type() is not supported") +class _TestMatmulComputeTypes(unittest.TestCase): + + def setUp(self): + self.old_compute_type = cupy._core.get_compute_type(self.dtype) + cupy._core.set_compute_type(self.dtype, self.compute_type) + + def tearDown(self): + cupy._core.set_compute_type(self.dtype, self.old_compute_type) + + def make_x1_x2(self, xp, shapes, dtypes): + x1 = testing.shaped_random(shapes[0], xp, dtypes[0]) + x2 = testing.shaped_random(shapes[1], xp, dtypes[1]) + return x1, x2 + + +@testing.parameterize( + *testing.product( + { + "compute_type": [ + # _linalg.COMPUTE_TYPE_DEFAULT, + # _linalg.COMPUTE_TYPE_PEDANTIC, + ], + "shape_pair": [ + ((32, 64), (64, 96)), + ((64, 96), (96, 32)), + ((96, 32), (32, 64)), + ], + } + ) +) +class TestMatmulFp16ComputeTypes(_TestMatmulComputeTypes): + dtype = numpy.float16 + + @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) + def test_operator_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, (self.dtype, self.dtype)) + return operator.matmul(x1, x2) + + @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) + def test_cupy_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, (self.dtype, self.dtype)) + return xp.matmul(x1, x2) + + +@testing.parameterize( + *testing.product( + { + "compute_type": [ + # _linalg.COMPUTE_TYPE_DEFAULT, + # _linalg.COMPUTE_TYPE_PEDANTIC, + # _linalg.COMPUTE_TYPE_TF32, + ], + "shape_pair": [ + ((100, 200), (200, 300)), + ((200, 300), (300, 100)), + ((300, 100), (100, 200)), + ], + "dtype_pair": [ + (numpy.float16, numpy.float32), + (numpy.float32, numpy.float32), + (numpy.float16, numpy.complex64), + (numpy.float32, numpy.complex64), + (numpy.complex64, numpy.complex64), + ], + } + ) +) +class TestMatmulFp32ComputeTypes(_TestMatmulComputeTypes): + dtype = numpy.float32 + + @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) + def test_operator_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, self.dtype_pair) + return operator.matmul(x1, x2) + + @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) + def test_cupy_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, self.dtype_pair) + return xp.matmul(x1, x2) + + +@testing.parameterize( + *testing.product( + { + "compute_type": [ + # _linalg.COMPUTE_TYPE_DEFAULT, + # _linalg.COMPUTE_TYPE_PEDANTIC, + ], + "shape_pair": [ + ((100, 200), (200, 300)), + ((200, 300), (300, 100)), + ((300, 100), (100, 200)), + ], + "dtype_pair": [ + (numpy.float32, numpy.float64), + (numpy.float64, numpy.float64), + (numpy.float32, numpy.complex128), + (numpy.float64, numpy.complex128), + (numpy.complex64, numpy.complex128), + (numpy.complex128, numpy.complex128), + ], + } + ) +) +class TestMatmulFp64ComputeTypes(_TestMatmulComputeTypes): + dtype = numpy.float64 + + @testing.numpy_cupy_allclose() + def test_operator_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, self.dtype_pair) + return operator.matmul(x1, x2) + + @testing.numpy_cupy_allclose() + def test_cupy_matmul(self, xp): + x1, x2 = self.make_x1_x2(xp, self.shape_pair, self.dtype_pair) + return xp.matmul(x1, x2) + + @testing.parameterize( *testing.product( { @@ -261,6 +387,7 @@ def test_overflow(self, xp, dtype): ) ) class TestMatmulInvalidShape(unittest.TestCase): + def test_invalid_shape(self): for xp in (numpy, cupy): shape1, shape2 = self.shape_pair @@ -303,6 +430,7 @@ def test_invalid_shape(self): ) ) class TestMatmulAxes(unittest.TestCase): + @testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-3) # required for uint8 def test_cupy_matmul_axes(self, xp): x1 = testing.shaped_arange(self.shapes_axes[0][0], xp) @@ -316,6 +444,17 @@ def test_cupy_matmul_axes_out(self, xp): x1 = testing.shaped_arange(self.shapes_axes[0][0], xp) x2 = testing.shaped_arange(self.shapes_axes[0][1], xp) out = xp.zeros(self.shapes_axes[0][2]) - result = xp.matmul(x1, x2, axes=self.shapes_axes[1], out=out) - assert out is result + xp.matmul(x1, x2, axes=self.shapes_axes[1], out=out) return out + + +@pytest.mark.skip("GUFunc is not supported") +class TestMatmulDispatch(unittest.TestCase): + + def test_matmul_dispatch(self): + x1 = testing.shaped_arange((2, 10, 5), cupy) + x2 = testing.shaped_arange((10, 2, 5), cupy) + o_np = numpy.matmul(x1, x2, axes=[(0, 1), (0, 1), (0, 1)]) + assert isinstance(o_np, cupy.ndarray) + o_cp = cupy.matmul(x1, x2, axes=[(0, 1), (0, 1), (0, 1)]) + testing.assert_allclose(o_np, o_cp) diff --git a/dpnp/tests/third_party/cupy/math_tests/test_misc.py b/dpnp/tests/third_party/cupy/math_tests/test_misc.py index 67a66ebf7755..c2d7d5ed0c86 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_misc.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_misc.py @@ -7,6 +7,7 @@ class TestMisc: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def check_unary(self, name, xp, dtype, no_bool=False): @@ -26,16 +27,11 @@ def check_binary(self, name, xp, dtype, no_bool=False): @testing.for_dtypes(["?", "b", "h", "i", "q", "e", "f", "d", "F", "D"]) @testing.numpy_cupy_allclose(atol=1e-5) - # TODO: remove no_comlex=True, once adopted to numpy 2.0 - def check_unary_negative( - self, name, xp, dtype, no_bool=False, no_complex=False - ): + def check_unary_negative(self, name, xp, dtype, no_bool=False): if no_bool and numpy.dtype(dtype).char == "?": return numpy.int_(0) a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype) if numpy.dtype(dtype).kind == "c": - if no_complex: - return numpy.int_(0) a += (a * 1j).astype(dtype) return getattr(xp, name)(a) @@ -126,21 +122,12 @@ def test_clip_max_none(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.clip(3, None) + @testing.with_requires("numpy>=2.1") @testing.for_all_dtypes(no_bool=True, no_complex=True) - def test_clip_min_max_none(self, dtype): - for xp in (numpy, cupy): - a = testing.shaped_arange((2, 3, 4), xp, dtype) - # According to Python Array API, clip() should return an array - # with the same elements in `a` if `min` and `max` are `None`. - # Numpy < 2.1 is not compatible with this and raises a ValueError - if ( - xp is numpy - and numpy.lib.NumpyVersion(numpy.__version__) < "2.1.0" - ): - with pytest.raises(ValueError): - a.clip(None, None) - else: - return a.clip(None, None) + @testing.numpy_cupy_array_equal() + def test_clip_min_max_none(self, xp, dtype): + a = testing.shaped_arange((2, 3, 4), xp, dtype) + return a.clip(None, None) @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_array_equal() @@ -209,18 +196,22 @@ def test_fabs(self, xp, dtype): a = xp.array([2, 3, 4], dtype=dtype) return xp.fabs(a) + @testing.with_requires("numpy>=2.0") @testing.for_all_dtypes(no_complex=True) - @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(atol=1e-5) def test_fabs_negative(self, xp, dtype): + if numpy.issubdtype(dtype, numpy.unsignedinteger): + pytest.skip("trying to set negative value to unsigned integer") a = xp.array([-2.0, -4.0, 0.0, 4.0], dtype=dtype) return xp.fabs(a) + @testing.with_requires("numpy>=2.0") def test_sign(self): self.check_unary("sign", no_bool=True) - # TODO: remove no_comlex=True, once adopted to numpy 2.0 + @testing.with_requires("numpy>=2.0") def test_sign_negative(self): - self.check_unary_negative("sign", no_bool=True, no_complex=True) + self.check_unary_negative("sign", no_bool=True) def test_maximum(self): self.check_binary("maximum") @@ -264,10 +255,9 @@ def test_nan_to_num_inf(self): def test_nan_to_num_nan(self): self.check_unary_nan("nan_to_num") - @pytest.mark.skip(reason="scalar input is not supported") - @testing.numpy_cupy_allclose(atol=1e-5) + @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def test_nan_to_num_scalar_nan(self, xp): - return xp.nan_to_num(xp.nan) + return xp.nan_to_num(xp.array(xp.nan)) def test_nan_to_num_inf_nan(self): self.check_unary_inf_nan("nan_to_num") @@ -292,15 +282,14 @@ def test_nan_to_num_inplace(self, xp): assert x is y return y - @pytest.mark.skip(reason="nan, posinf, neginf as array are not supported") @pytest.mark.parametrize("kwarg", ["nan", "posinf", "neginf"]) def test_nan_to_num_broadcast(self, kwarg): for xp in (numpy, cupy): x = xp.asarray([0, 1, xp.nan, 4], dtype=cupy.default_float_type()) y = xp.zeros((2, 4), dtype=cupy.default_float_type()) - with pytest.raises(TypeError): + with pytest.raises((ValueError, TypeError)): xp.nan_to_num(x, **{kwarg: y}) - with pytest.raises(TypeError): + with pytest.raises((ValueError, TypeError)): xp.nan_to_num(0.0, **{kwarg: y}) @testing.for_all_dtypes(no_bool=True, no_complex=True) @@ -377,6 +366,7 @@ def test_real_if_close_with_float_tol_false(self, xp, dtype): assert x.dtype == out.dtype return out + @pytest.mark.skip("interp() is not supported yet") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_all_dtypes(name="dtype_y", no_bool=True) @testing.numpy_cupy_allclose(atol=1e-5) @@ -387,6 +377,7 @@ def test_interp(self, xp, dtype_y, dtype_x): fy = xp.sin(fx).astype(dtype_y) return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_all_dtypes(name="dtype_y", no_bool=True) @testing.numpy_cupy_allclose(atol=1e-5) @@ -397,6 +388,7 @@ def test_interp_period(self, xp, dtype_y, dtype_x): fy = xp.sin(fx).astype(dtype_y) return xp.interp(x, fx, fy, period=5) + @pytest.mark.skip("interp() is not supported yet") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_all_dtypes(name="dtype_y", no_bool=True) @testing.numpy_cupy_allclose(atol=1e-5) @@ -409,6 +401,7 @@ def test_interp_left_right(self, xp, dtype_y, dtype_x): right = 20 return xp.interp(x, fx, fy, left, right) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_dtypes("efdFD", name="dtype_y") @@ -421,6 +414,7 @@ def test_interp_nan_fy(self, xp, dtype_y, dtype_x): fy[0] = fy[2] = fy[-1] = numpy.nan return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_float_dtypes(name="dtype_x") @testing.for_dtypes("efdFD", name="dtype_y") @@ -433,6 +427,7 @@ def test_interp_nan_fx(self, xp, dtype_y, dtype_x): fx[-1] = numpy.nan # x and fx must remain sorted (NaNs are the last) return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_float_dtypes(name="dtype_x") @testing.for_dtypes("efdFD", name="dtype_y") @@ -445,6 +440,7 @@ def test_interp_nan_x(self, xp, dtype_y, dtype_x): x[-1] = numpy.nan # x and fx must remain sorted (NaNs are the last) return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_dtypes("efdFD", name="dtype_y") @@ -457,6 +453,7 @@ def test_interp_inf_fy(self, xp, dtype_y, dtype_x): fy[0] = fy[2] = fy[-1] = numpy.inf return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_float_dtypes(name="dtype_x") @testing.for_dtypes("efdFD", name="dtype_y") @@ -469,6 +466,7 @@ def test_interp_inf_fx(self, xp, dtype_y, dtype_x): fx[-1] = numpy.inf # x and fx must remain sorted return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_float_dtypes(name="dtype_x") @testing.for_dtypes("efdFD", name="dtype_y") @@ -481,6 +479,7 @@ def test_interp_inf_x(self, xp, dtype_y, dtype_x): x[-1] = numpy.inf # x and fx must remain sorted return xp.interp(x, fx, fy) + @pytest.mark.skip("interp() is not supported yet") @testing.for_all_dtypes(name="dtype_x", no_bool=True, no_complex=True) @testing.for_all_dtypes(name="dtype_y", no_bool=True) @testing.numpy_cupy_allclose(atol=1e-5) @@ -493,6 +492,7 @@ def test_interp_size1(self, xp, dtype_y, dtype_x): right = 20 return xp.interp(x, fx, fy, left, right) + @pytest.mark.skip("interp() is not supported yet") @testing.with_requires("numpy>=1.17.0") @testing.for_float_dtypes(name="dtype_x") @testing.for_dtypes("efdFD", name="dtype_y") @@ -532,6 +532,7 @@ def test_heaviside_nan_inf(self, xp, dtype_1, dtype_2): ) @pytest.mark.skip("convolve() is not implemented yet") class TestConvolveShapeCombination: + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-3) def test_convolve(self, xp, dtype): @@ -543,6 +544,7 @@ def test_convolve(self, xp, dtype): @pytest.mark.skip("convolve() is not implemented yet") @pytest.mark.parametrize("mode", ["valid", "same", "full"]) class TestConvolve: + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_convolve_non_contiguous(self, xp, dtype, mode): @@ -568,6 +570,7 @@ def test_convolve_diff_types(self, xp, dtype1, dtype2, mode): @pytest.mark.skip("convolve() is not implemented yet") @testing.parameterize(*testing.product({"mode": ["valid", "same", "full"]})) class TestConvolveInvalid: + @testing.for_all_dtypes() def test_convolve_empty(self, dtype): for xp in (numpy, cupy): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_rational.py b/dpnp/tests/third_party/cupy/math_tests/test_rational.py index 218bd7918050..36c11cca1dde 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_rational.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_rational.py @@ -7,6 +7,7 @@ class TestRational(unittest.TestCase): + @testing.for_dtypes(["?", "e", "f", "d", "F", "D"]) def test_gcd_dtype_check(self, dtype): a = cupy.random.randint(-10, 10, size=(10, 10)).astype(dtype) diff --git a/dpnp/tests/third_party/cupy/math_tests/test_rounding.py b/dpnp/tests/third_party/cupy/math_tests/test_rounding.py index 54a379490865..a2ad717f2500 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_rounding.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_rounding.py @@ -9,6 +9,7 @@ class TestRounding(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=False, atol=1e-5) def check_unary(self, name, xp, dtype): @@ -25,8 +26,6 @@ def check_unary_complex(self, name, xp, dtype): def check_unary_complex_unsupported(self, name, dtype): for xp in (numpy, cupy): a = testing.shaped_arange((2, 3), xp, dtype) - # NumPy returns TypeError while DPNP returns ValueError - # for these functions: "ceil", "floor", "trunc" with pytest.raises((TypeError, ValueError)): getattr(xp, name)(a) @@ -52,18 +51,22 @@ def test_rint_negative(self): self.check_unary_negative("rint") self.check_unary_negative_complex("rint") + @testing.with_requires("numpy>=2.1") def test_floor(self): self.check_unary("floor") self.check_unary_complex_unsupported("floor") + @testing.with_requires("numpy>=2.1") def test_ceil(self): self.check_unary("ceil") self.check_unary_complex_unsupported("ceil") + @testing.with_requires("numpy>=2.1") def test_trunc(self): self.check_unary("trunc") self.check_unary_complex_unsupported("trunc") + @testing.with_requires("numpy>=2.1") def test_fix(self): self.check_unary("fix") self.check_unary_complex_unsupported("fix") @@ -74,7 +77,7 @@ def test_around(self): def test_round(self): self.check_unary("round") - self.check_unary_complex("around") + self.check_unary_complex("round") @testing.parameterize( @@ -85,6 +88,7 @@ def test_round(self): ) ) class TestRound(unittest.TestCase): + shape = (20,) @testing.for_all_dtypes() @@ -101,10 +105,11 @@ def test_round(self, xp, dtype): a = testing.shaped_random(self.shape, xp, scale=100, dtype=dtype) return xp.around(a, self.decimals) - @testing.numpy_cupy_allclose(atol=1e-5) + @testing.numpy_cupy_allclose() def test_round_out(self, xp): - dtype = "d" if has_support_aspect64() else "f" - a = testing.shaped_random(self.shape, xp, scale=100, dtype=dtype) + a = testing.shaped_random( + self.shape, xp, scale=100, dtype=cupy.default_float_type() + ) out = xp.empty_like(a) xp.around(a, self.decimals, out) return out @@ -121,6 +126,7 @@ def test_round_out(self, xp): not has_support_aspect64(), reason="overflow encountered for float32 dtype" ) class TestRoundExtreme(unittest.TestCase): + shape = (20,) dtype_ = ( @@ -160,10 +166,11 @@ def test_round_small(self, xp, dtype): ) ) class TestRoundBorder(unittest.TestCase): - @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) + + @pytest.mark.skip("scalar input is not supported") + @testing.numpy_cupy_allclose(atol=1e-5) def test_around_positive1(self, xp): a, decimals = self.value - a = xp.asarray(a) return xp.around(a, decimals) @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) @@ -172,10 +179,10 @@ def test_around_positive2(self, xp): a = xp.asarray(a) return xp.around(a, decimals) - @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) + @pytest.mark.skip("scalar input is not supported") + @testing.numpy_cupy_allclose(atol=1e-5) def test_around_negative1(self, xp): a, decimals = self.value - a = xp.asarray(a) return xp.around(-a, decimals) @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) diff --git a/dpnp/tests/third_party/cupy/math_tests/test_special.py b/dpnp/tests/third_party/cupy/math_tests/test_special.py index 8c6753e12c35..49b9713fd480 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_special.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_special.py @@ -7,6 +7,7 @@ class TestSpecial(unittest.TestCase): + @testing.for_dtypes(["e", "f", "d"]) @testing.numpy_cupy_allclose(rtol=1e-3) def test_i0(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py b/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py index cc482cabea95..cc58a8cc32a3 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py @@ -1,5 +1,4 @@ import math -import warnings import numpy import pytest @@ -14,6 +13,7 @@ class TestSumprod: + @pytest.fixture(autouse=True) def tearDown(self): # Free huge memory for slow test @@ -204,13 +204,241 @@ def test_prod_dtype(self, xp, src_dtype, dst_dtype): a = testing.shaped_arange((2, 3), xp, src_dtype) return a.prod(dtype=dst_dtype) - @pytest.mark.skip("product() is deprecated") - @testing.numpy_cupy_allclose() - def test_product_alias(self, xp): - a = testing.shaped_arange((2, 3), xp, xp.float32) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - return xp.product(a) + +# This class compares CUB results against NumPy's +@testing.parameterize( + *testing.product( + { + "shape": [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], + "order": ("C", "F"), + "backend": ("device", "block"), + } + ) +) +@pytest.mark.skip("_cub_reduction is not supported") +class TestCubReduction: + + @pytest.fixture(autouse=True) + def setUp(self): + old_routine_accelerators = _acc.get_routine_accelerators() + old_reduction_accelerators = _acc.get_reduction_accelerators() + if self.backend == "device": + _acc.set_routine_accelerators(["cub"]) + _acc.set_reduction_accelerators([]) + elif self.backend == "block": + _acc.set_routine_accelerators([]) + _acc.set_reduction_accelerators(["cub"]) + yield + _acc.set_routine_accelerators(old_routine_accelerators) + _acc.set_reduction_accelerators(old_reduction_accelerators) + + @testing.for_contiguous_axes() + # sum supports less dtypes; don't test float16 as it's not as accurate? + @testing.for_dtypes("qQfdFD") + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_cub_sum(self, xp, dtype, axis): + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + + if xp is numpy: + return a.sum(axis=axis) + + # xp is cupy, first ensure we really use CUB + ret = cupy.empty(()) # Cython checks return type, need to fool it + if self.backend == "device": + func_name = "cupy._core._routines_math.cub." + if len(axis) == len(self.shape): + func_name += "device_reduce" + else: + func_name += "device_segmented_reduce" + with testing.AssertFunctionIsCalled(func_name, return_value=ret): + a.sum(axis=axis) + elif self.backend == "block": + # this is the only function we can mock; the rest is cdef'd + func_name = "cupy._core._cub_reduction." + func_name += "_SimpleCubReductionKernel_get_cached_function" + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + if len(axis) == len(self.shape): + times_called = 2 # two passes + else: + times_called = 1 # one pass + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=times_called + ): + a.sum(axis=axis) + # ...then perform the actual computation + return a.sum(axis=axis) + + # sum supports less dtypes; don't test float16 as it's not as accurate? + @testing.for_dtypes("qQfdFD") + @testing.numpy_cupy_allclose(rtol=1e-5, contiguous_check=False) + def test_cub_sum_empty_axis(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + return a.sum(axis=()) + + @testing.for_contiguous_axes() + # prod supports less dtypes; don't test float16 as it's not as accurate? + @testing.for_dtypes("qQfdFD") + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_cub_prod(self, xp, dtype, axis): + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + + if xp is numpy: + return a.prod(axis=axis) + + # xp is cupy, first ensure we really use CUB + ret = cupy.empty(()) # Cython checks return type, need to fool it + if self.backend == "device": + func_name = "cupy._core._routines_math.cub." + if len(axis) == len(self.shape): + func_name += "device_reduce" + else: + func_name += "device_segmented_reduce" + with testing.AssertFunctionIsCalled(func_name, return_value=ret): + a.prod(axis=axis) + elif self.backend == "block": + # this is the only function we can mock; the rest is cdef'd + func_name = "cupy._core._cub_reduction." + func_name += "_SimpleCubReductionKernel_get_cached_function" + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + if len(axis) == len(self.shape): + times_called = 2 # two passes + else: + times_called = 1 # one pass + with testing.AssertFunctionIsCalled( + func_name, wraps=func, times_called=times_called + ): + a.prod(axis=axis) + # ...then perform the actual computation + return a.prod(axis=axis) + + # TODO(leofang): test axis after support is added + # don't test float16 as it's not as accurate? + @testing.for_dtypes("bhilBHILfdF") + @testing.numpy_cupy_allclose(rtol=1e-4) + def test_cub_cumsum(self, xp, dtype): + if self.backend == "block": + pytest.skip("does not support") + + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + + if xp is numpy: + return a.cumsum() + + # xp is cupy, first ensure we really use CUB + ret = cupy.empty(()) # Cython checks return type, need to fool it + func = "cupy._core._routines_math.cub.device_scan" + with testing.AssertFunctionIsCalled(func, return_value=ret): + a.cumsum() + # ...then perform the actual computation + return a.cumsum() + + # TODO(leofang): test axis after support is added + # don't test float16 as it's not as accurate? + @testing.for_dtypes("bhilBHILfdF") + @testing.numpy_cupy_allclose(rtol=1e-4) + def test_cub_cumprod(self, xp, dtype): + if self.backend == "block": + pytest.skip("does not support") + + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + + if xp is numpy: + result = a.cumprod() + return self._mitigate_cumprod(xp, dtype, result) + + # xp is cupy, first ensure we really use CUB + ret = cupy.empty(()) # Cython checks return type, need to fool it + func = "cupy._core._routines_math.cub.device_scan" + with testing.AssertFunctionIsCalled(func, return_value=ret): + a.cumprod() + # ...then perform the actual computation + result = a.cumprod() + return self._mitigate_cumprod(xp, dtype, result) + + def _mitigate_cumprod(self, xp, dtype, result): + # for testing cumprod against complex arrays, the gotcha is CuPy may + # produce only Inf at the position where NumPy starts to give NaN. So, + # an error would be raised during assert_allclose where the positions + # of NaNs are examined. Since this is both algorithm and architecture + # dependent, we have no control over this behavior and can only + # circumvent the issue by manually converting Inf to NaN + if dtype in (numpy.complex64, numpy.complex128): + pos = xp.where(xp.isinf(result)) + result[pos] = xp.nan + 1j * xp.nan + return result + + +# This class compares cuTENSOR results against NumPy's +@testing.parameterize( + *testing.product( + { + "shape": [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)], + "order": ("C", "F"), + } + ) +) +@pytest.mark.skip("cutensor is not supported") +class TestCuTensorReduction: + + @pytest.fixture(autouse=True) + def setUp(self): + old_accelerators = cupy._core.get_routine_accelerators() + cupy._core.set_routine_accelerators(["cutensor"]) + yield + cupy._core.set_routine_accelerators(old_accelerators) + + @testing.for_contiguous_axes() + # sum supports less dtypes; don't test float16 as it's not as accurate? + @testing.for_dtypes("qQfdFD") + @testing.numpy_cupy_allclose(rtol=1e-5, contiguous_check=False) + def test_cutensor_sum(self, xp, dtype, axis): + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + + if xp is numpy: + return a.sum(axis=axis) + + # xp is cupy, first ensure we really use cuTENSOR + ret = cupy.empty(()) # Cython checks return type, need to fool it + func = "cupyx.cutensor._try_reduction_routine" + with testing.AssertFunctionIsCalled(func, return_value=ret): + a.sum(axis=axis) + # ...then perform the actual computation + return a.sum(axis=axis) + + # sum supports less dtypes; don't test float16 as it's not as accurate? + @testing.for_dtypes("qQfdFD") + @testing.numpy_cupy_allclose(rtol=1e-5, contiguous_check=False) + def test_cutensor_sum_empty_axis(self, xp, dtype): + a = testing.shaped_random(self.shape, xp, dtype) + if self.order in ("c", "C"): + a = xp.ascontiguousarray(a) + elif self.order in ("f", "F"): + a = xp.asfortranarray(a) + return a.sum(axis=()) @testing.parameterize( @@ -225,6 +453,7 @@ def test_product_alias(self, xp): ) ) class TestNansumNanprodLong: + def _do_transposed_axis_test(self): return not self.transpose_axes and self.axis != 1 @@ -257,7 +486,6 @@ def _test(self, xp, dtype): @testing.for_all_dtypes(no_bool=True, no_float16=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_nansum_all(self, xp, dtype): - dtype = xp.float32 if ( not self._numpy_nanprod_implemented() or not self._do_transposed_axis_test() @@ -284,6 +512,7 @@ def test_nansum_axis_transposed(self, xp, dtype): ) ) class TestNansumNanprodExtra: + def test_nansum_axis_float16(self): # Note that the above test example overflows in float16. We use a # smaller array instead, just return if array is too large. @@ -360,6 +589,7 @@ def test_nansum_axis_huge_halfnan(self, xp): @testing.parameterize(*testing.product({"axis": axes})) class TestCumsum: + def _cumsum(self, xp, a, *args, **kwargs): b = a.copy() res = xp.cumsum(a, *args, **kwargs) @@ -475,6 +705,7 @@ def test_cumsum_numpy_array(self, dtype): class TestCumprod: + def _cumprod(self, xp, a, *args, **kwargs): b = a.copy() res = xp.cumprod(a, *args, **kwargs) @@ -569,14 +800,6 @@ def test_cumprod_numpy_array(self, dtype): with pytest.raises(TypeError): return cupy.cumprod(a_numpy) - @pytest.mark.skip("cumproduct() is deprecated") - @testing.numpy_cupy_allclose() - def test_cumproduct_alias(self, xp): - a = testing.shaped_arange((2, 3), xp, xp.float32) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - return xp.cumproduct(a) - @pytest.mark.usefixtures("suppress_invalid_numpy_warnings") @testing.parameterize( @@ -589,6 +812,7 @@ def test_cumproduct_alias(self, xp): ) ) class TestNanCumSumProd: + zero_density = 0.25 def _make_array(self, dtype): @@ -636,6 +860,7 @@ def test_nancumsumprod_out(self, xp, dtype): class TestDiff: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_diff_1dim(self, xp, dtype): @@ -735,6 +960,7 @@ def test_diff_invalid_axis(self): ) ) class TestGradient: + def _gradient(self, xp, dtype, shape, spacing, axis, edge_order): if ( not has_support_aspect64() @@ -799,6 +1025,7 @@ def test_gradient_float16(self, xp): class TestGradientErrors: + def test_gradient_invalid_spacings1(self): # more spacings than axes spacing = (1.0, 2.0, 3.0) @@ -861,6 +1088,7 @@ def test_gradient_bool_input(self): class TestEdiff1d: + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_allclose() def test_ediff1d_1dim(self, xp, dtype): @@ -930,60 +1158,56 @@ def test_ediff1d_ed2(self, xp, dtype): ) -class TestTrapz: - def get_func(self, xp): - if xp is numpy and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0": - # `trapz` is deprecated in NumPy 2.0 - return xp.trapz - return xp.trapezoid +@testing.with_requires("numpy>=2.0") +class TestTrapezoid: @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_1dim(self, xp, dtype): a = testing.shaped_arange((5,), xp, dtype) - return self.get_func(xp)(a) + return xp.trapezoid(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_1dim_with_x(self, xp, dtype): a = testing.shaped_arange((5,), xp, dtype) x = testing.shaped_arange((5,), xp, dtype) - return self.get_func(xp)(a, x=x) + return xp.trapezoid(a, x=x) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_1dim_with_dx(self, xp, dtype): a = testing.shaped_arange((5,), xp, dtype) - return self.get_func(xp)(a, dx=0.1) + return xp.trapezoid(a, dx=0.1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_2dim_without_axis(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) - return self.get_func(xp)(a) + return xp.trapezoid(a) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_2dim_with_axis(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) - return self.get_func(xp)(a, axis=-2) + return xp.trapezoid(a, axis=-2) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_2dim_with_x_and_axis(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) x = testing.shaped_arange((5,), xp, dtype) - return self.get_func(xp)(a, x=x, axis=1) + return xp.trapezoid(a, x=x, axis=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_2dim_with_dx_and_axis(self, xp, dtype): a = testing.shaped_arange((4, 5), xp, dtype) - return self.get_func(xp)(a, dx=0.1, axis=1) + return xp.trapezoid(a, dx=0.1, axis=1) @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-1, "default": 1e-7}) def test_trapz_1dim_with_x_and_dx(self, xp, dtype): a = testing.shaped_arange((5,), xp, dtype) x = testing.shaped_arange((5,), xp, dtype) - return self.get_func(xp)(a, x=x, dx=0.1) + return xp.trapezoid(a, x=x, dx=0.1) diff --git a/dpnp/tests/third_party/cupy/math_tests/test_trigonometric.py b/dpnp/tests/third_party/cupy/math_tests/test_trigonometric.py index 9161b0898603..e0d4f484082d 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_trigonometric.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_trigonometric.py @@ -5,6 +5,7 @@ class TestTrigonometric(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(atol=1e-5, type_check=has_support_aspect64()) def check_unary(self, name, xp, dtype): @@ -57,6 +58,7 @@ def test_rad2deg(self): @testing.with_requires("numpy>=1.21.0") class TestUnwrap(unittest.TestCase): + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_unwrap_1dim(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/math_tests/test_window.py b/dpnp/tests/third_party/cupy/math_tests/test_window.py new file mode 100644 index 000000000000..1d7e3642cf93 --- /dev/null +++ b/dpnp/tests/third_party/cupy/math_tests/test_window.py @@ -0,0 +1,46 @@ +import unittest + +import pytest + +from dpnp.tests.third_party.cupy import testing + +pytest.skip("window functions are not supported yet", allow_module_level=True) + + +@testing.parameterize( + *testing.product( + { + "m": [0, 1, -1, 1024], + "name": ["bartlett", "blackman", "hamming", "hanning"], + } + ) +) +class TestWindow(unittest.TestCase): + + @testing.numpy_cupy_allclose(atol=1e-5) + def test_window(self, xp): + return getattr(xp, self.name)(self.m) + + +@testing.parameterize( + *testing.product( + { + "m": [10, 30, 1024], + "beta": [-3.4, 0, 5, 6, 8.6], + "name": ["kaiser"], + } + ) +) +class TestKaiser(unittest.TestCase): + + @testing.numpy_cupy_allclose(atol=1e-5) + def test_kaiser_parametric(self, xp): + return getattr(xp, self.name)(self.m, self.beta) + + +@testing.parameterize(*testing.product({"m": [-1, 0, 1]})) +class TestKaiserBoundary(unittest.TestCase): + + @testing.numpy_cupy_allclose(atol=1e-5) + def test_kaiser(self, xp): + return xp.kaiser(self.m, 1.5) diff --git a/dpnp/tests/third_party/cupy/misc_tests/test_byte_bounds.py b/dpnp/tests/third_party/cupy/misc_tests/test_byte_bounds.py index c341d30b36db..a0cfb53f93b6 100644 --- a/dpnp/tests/third_party/cupy/misc_tests/test_byte_bounds.py +++ b/dpnp/tests/third_party/cupy/misc_tests/test_byte_bounds.py @@ -1,75 +1,81 @@ import dpnp as cupy - - -def test_1d_contiguous(): - a = cupy.zeros(12, dtype=cupy.int64) - itemsize = a.itemsize - a_low = a.get_array()._pointer - a_high = a.get_array()._pointer + 12 * itemsize - assert cupy.byte_bounds(a) == (a_low, a_high) - - -def test_2d_contiguous(): - a = cupy.zeros((4, 7), dtype=cupy.int64) - itemsize = a.itemsize - a_low = a.get_array()._pointer - a_high = a.get_array()._pointer + 4 * 7 * itemsize - assert cupy.byte_bounds(a) == (a_low, a_high) - - -def test_1d_noncontiguous_pos_stride(): - a = cupy.zeros(12, dtype=cupy.int64) - itemsize = a.itemsize - b = a[::2] - b_low = b.get_array()._pointer - b_high = b.get_array()._pointer + 11 * itemsize # a[10] - assert cupy.byte_bounds(b) == (b_low, b_high) - - -def test_2d_noncontiguous_pos_stride(): - a = cupy.zeros((4, 7), dtype=cupy.int64) - b = a[::2, ::2] - itemsize = b.itemsize - b_low = a.get_array()._pointer - b_high = b.get_array()._pointer + 3 * 7 * itemsize # a[2][6] - assert cupy.byte_bounds(b) == (b_low, b_high) - - -def test_1d_contiguous_neg_stride(): - a = cupy.zeros(12, dtype=cupy.int64) - b = a[::-1] - itemsize = b.itemsize - b_low = b.get_array()._pointer - 11 * itemsize - b_high = b.get_array()._pointer + 1 * itemsize - assert cupy.byte_bounds(b) == (b_low, b_high) - - -def test_2d_noncontiguous_neg_stride(): - a = cupy.zeros((4, 7), dtype=cupy.int64) - b = a[::-2, ::-2] # strides = (-56, -8), shape = (2, 4) - itemsize = b.itemsize - b_low = ( - b.get_array()._pointer - - 2 * 7 * itemsize * (2 - 1) - - 2 * itemsize * (4 - 1) - ) - b_high = b.get_array()._pointer + 1 * itemsize - assert cupy.byte_bounds(b) == (b_low, b_high) - - -def test_2d_noncontiguous_posneg_stride_1(): - a = cupy.zeros((4, 7), dtype=cupy.int64) - b = a[::1, ::-1] # strides = (28, -4), shape=(4, 7) - itemsize = b.itemsize - b_low = b.get_array()._pointer - itemsize * (7 - 1) - b_high = b.get_array()._pointer + 1 * itemsize + 7 * itemsize * (4 - 1) - assert cupy.byte_bounds(b) == (b_low, b_high) - - -def test_2d_noncontiguous_posneg_stride_2(): - a = cupy.zeros((4, 7), dtype=cupy.int64) - b = a[::2, ::-2] # strides = (56, -8), shape=(2, 4) - itemsize = b.itemsize - b_low = b.get_array()._pointer - 2 * itemsize * (4 - 1) - b_high = b.get_array()._pointer + 1 * itemsize + 2 * 7 * itemsize * (2 - 1) - assert cupy.byte_bounds(b) == (b_low, b_high) +from dpnp.tests.third_party.cupy import testing + + +class TestByteBounds: + + @testing.for_all_dtypes() + def test_1d_contiguous(self, dtype): + a = cupy.zeros(12, dtype=dtype) + itemsize = a.itemsize + a_low = a.get_array()._pointer + a_high = a.get_array()._pointer + 12 * itemsize + assert cupy.byte_bounds(a) == (a_low, a_high) + + @testing.for_all_dtypes() + def test_2d_contiguous(self, dtype): + a = cupy.zeros((4, 7), dtype=dtype) + itemsize = a.itemsize + a_low = a.get_array()._pointer + a_high = a.get_array()._pointer + 4 * 7 * itemsize + assert cupy.byte_bounds(a) == (a_low, a_high) + + @testing.for_all_dtypes() + def test_1d_noncontiguous_pos_stride(self, dtype): + a = cupy.zeros(12, dtype=dtype) + itemsize = a.itemsize + b = a[::2] + b_low = b.get_array()._pointer + b_high = b.get_array()._pointer + 11 * itemsize # a[10] + assert cupy.byte_bounds(b) == (b_low, b_high) + + @testing.for_all_dtypes() + def test_2d_noncontiguous_pos_stride(self, dtype): + a = cupy.zeros((4, 7), dtype=dtype) + b = a[::2, ::2] + itemsize = b.itemsize + b_low = a.get_array()._pointer + b_high = b.get_array()._pointer + 3 * 7 * itemsize # a[2][6] + assert cupy.byte_bounds(b) == (b_low, b_high) + + @testing.for_all_dtypes() + def test_1d_contiguous_neg_stride(self, dtype): + a = cupy.zeros(12, dtype=dtype) + b = a[::-1] + itemsize = b.itemsize + b_low = b.get_array()._pointer - 11 * itemsize + b_high = b.get_array()._pointer + 1 * itemsize + assert cupy.byte_bounds(b) == (b_low, b_high) + + @testing.for_all_dtypes() + def test_2d_noncontiguous_neg_stride(self, dtype): + a = cupy.zeros((4, 7), dtype=dtype) + b = a[::-2, ::-2] # strides = (-56, -8), shape = (2, 4) + itemsize = b.itemsize + b_low = ( + b.get_array()._pointer + - 2 * 7 * itemsize * (2 - 1) + - 2 * itemsize * (4 - 1) + ) + b_high = b.get_array()._pointer + 1 * itemsize + assert cupy.byte_bounds(b) == (b_low, b_high) + + @testing.for_all_dtypes() + def test_2d_noncontiguous_posneg_stride_1(self, dtype): + a = cupy.zeros((4, 7), dtype=dtype) + b = a[::1, ::-1] # strides = (28, -4), shape=(4, 7) + itemsize = b.itemsize + b_low = b.get_array()._pointer - itemsize * (7 - 1) + b_high = b.get_array()._pointer + 1 * itemsize + 7 * itemsize * (4 - 1) + assert cupy.byte_bounds(b) == (b_low, b_high) + + @testing.for_all_dtypes() + def test_2d_noncontiguous_posneg_stride_2(self, dtype): + a = cupy.zeros((4, 7), dtype=dtype) + b = a[::2, ::-2] # strides = (56, -8), shape=(2, 4) + itemsize = b.itemsize + b_low = b.get_array()._pointer - 2 * itemsize * (4 - 1) + b_high = ( + b.get_array()._pointer + 1 * itemsize + 2 * 7 * itemsize * (2 - 1) + ) + assert cupy.byte_bounds(b) == (b_low, b_high) diff --git a/dpnp/tests/third_party/cupy/misc_tests/test_memory_ranges.py b/dpnp/tests/third_party/cupy/misc_tests/test_memory_ranges.py new file mode 100644 index 000000000000..2cd35ccc7222 --- /dev/null +++ b/dpnp/tests/third_party/cupy/misc_tests/test_memory_ranges.py @@ -0,0 +1,161 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +pytest.skip( + "may_share_memory() and shares_memory() are not supported yet", + allow_module_level=True, +) + + +class TestMayShareMemory(unittest.TestCase): + + @testing.numpy_cupy_equal() + def test_different_arrays(self, xp): + a = xp.array([1, 2, 3]) + b = xp.array([1, 2, 3]) + assert xp.may_share_memory(a, b) is False + + @testing.numpy_cupy_equal() + def test_same_array(self, xp): + a = xp.array([1, 2, 3]) + assert xp.may_share_memory(a, a) is True + + @testing.numpy_cupy_equal() + def test_zero_size(self, xp): + a = xp.array([]) + assert xp.may_share_memory(a, a) is False + + @testing.numpy_cupy_equal() + def test_shares_memory(self, xp): + x = xp.arange(12) + a = x[0:7] + b = x[6:12] + assert xp.may_share_memory(a, b) is True + + @testing.numpy_cupy_equal() + def test_cover(self, xp): + x = xp.arange(12) + a = x[1:10] + b = x[4:6] + assert xp.may_share_memory(a, b) is True + + @testing.numpy_cupy_equal() + def test_away(self, xp): + x = xp.arange(12) + a = x[1:6] + b = x[8:11] + assert xp.may_share_memory(a, b) is False + + @testing.numpy_cupy_equal() + def test_touch_edge_true(self, xp): + x = xp.arange(12) + a = x[1:10] + b = x[7:10] + assert xp.may_share_memory(a, b) is True + + def test_negative_strides(self): + for xp in (numpy, cupy): + a = xp.zeros((3, 3)) + assert xp.may_share_memory(a[:2, 1::-1], a[1:, 1:]) is True + + @testing.numpy_cupy_equal() + def test_touch_edge_false(self, xp): + x = xp.arange(12) + a = x[1:7] + b = x[7:10] + assert xp.may_share_memory(a, b) is False + + def _get_slices(self, size): + slices = [] + for start in range(0, size + 1): + for end in range(start, size + 1): + for step in range(-2, 2): + if step != 0: + slices.append(slice(start, end, step)) + return slices + + def test_combination(self): + size = 4 + slices = self._get_slices(size) + memory_np = numpy.empty(size * size) + memory_cp = cupy.empty(size * size) + + arrays = [] + + array_1d_np = memory_np[5 : 5 + size] + array_1d_cp = memory_cp[5 : 5 + size] + for s in slices: + arrays.append((array_1d_np[s], array_1d_cp[s], s)) + + array_2d_np = memory_np.reshape(size, size) + array_2d_cp = memory_cp.reshape(size, size) + for s1 in slices: + for s2 in slices: + arrays.append( + (array_2d_np[s1, s2], array_2d_cp[s1, s2], (s1, s2)) + ) + + for array1_np, array1_cp, sl1 in arrays: + for array2_np, array2_cp, sl2 in arrays: + ret_np = numpy.may_share_memory(array1_np, array2_np) + ret_cp = cupy.may_share_memory(array1_cp, array2_cp) + assert ret_np == ret_cp, "Failed in case of {} and {}".format( + sl1, sl2 + ) + + +class TestSharesMemory(unittest.TestCase): + + def test_different_arrays(self): + for xp in (numpy, cupy): + a = xp.array([1, 2, 3]) + b = xp.array([1, 2, 3]) + assert xp.shares_memory(a, b) is False + + def test_same_array(self): + for xp in (numpy, cupy): + a = xp.array([1, 2, 3]) + assert xp.shares_memory(a, a) is True + + def test_zero_size_array(self): + for xp in (numpy, cupy): + a = xp.array([]) + assert xp.shares_memory(a, a) is False + + def test_contiguous_arrays(self): + for xp in (numpy, cupy): + x = xp.arange(12) + # shares memory + assert xp.shares_memory(x[0:7], x[6:12]) is True + # covers + assert xp.shares_memory(x[1:10], x[4:6]) is True + assert xp.shares_memory(x[4:6], x[1:10]) is True + # detached + assert xp.shares_memory(x[1:6], x[8:11]) is False + # touch + assert xp.shares_memory(x[1:10], x[7:10]) is True + assert xp.shares_memory(x[1:7], x[7:10]) is False + + def test_non_contiguous_case(self): + for xp in (numpy, cupy): + x = xp.arange(100) + assert xp.shares_memory(x, x[1::4]) is True + assert xp.shares_memory(x[0::2], x[1::4]) is False + assert xp.shares_memory(x[0::9], x[1::11]) is True + + def test_multi_dimension_case(self): + for xp in (numpy, cupy): + x = xp.arange(100).reshape(10, 10) + assert xp.shares_memory(x[0::2], x[1::3]) is True + assert xp.shares_memory(x[0::2], x[1::4]) is False + assert xp.shares_memory(x[0::2], x[::, 1::2]) is True + + def test_complex_type_case(self): + for xp in (numpy, cupy): + x = testing.shaped_random((2, 3, 4), xp, numpy.complex128) + assert xp.shares_memory(x, x.imag) is True diff --git a/dpnp/tests/third_party/cupy/misc_tests/test_who.py b/dpnp/tests/third_party/cupy/misc_tests/test_who.py new file mode 100644 index 000000000000..7c8ca2f2bada --- /dev/null +++ b/dpnp/tests/third_party/cupy/misc_tests/test_who.py @@ -0,0 +1,69 @@ +import pytest + +import dpnp as cupy + +pytest.skip("who() is not supported yet", allow_module_level=True) + + +class TestWho: + def test_who_empty(self, capsys): + cupy.who() + out, err = capsys.readouterr() + lines = out.split("\n") + assert len(lines) == 3 + assert lines[1] == "Upper bound on total bytes = 0" + + def test_who_local_var(self, capsys): + # Variables declared inside an object function are not visible + # this is true also for numpy + x = cupy.ones(10) + cupy.who() + out, err = capsys.readouterr() + lines = out.split("\n") + assert len(lines) == 3 + assert lines[1] == "Upper bound on total bytes = 0" + + def test_who_global(self, capsys): + global x + x = cupy.ones(10) + cupy.who() + out, err = capsys.readouterr() + lines = out.split("\n") + assert lines[-4].split() == ["x", "10", "80", "float64"] + assert lines[-2] == "Upper bound on total bytes = 80" + + def test_who_global_multi(self, capsys): + global x + global y + x = cupy.ones(10) + y = cupy.ones(20, dtype=cupy.int32) + cupy.who() + out, err = capsys.readouterr() + lines = out.split("\n") + # depending on the env, the order in which vars are print + # might be different + var_1 = lines[-5].split() + var_2 = lines[-4].split() + if var_1[0] == "x": + assert var_1 == ["x", "10", "80", "float64"] + assert var_2 == ["y", "20", "80", "int32"] + else: + assert var_2 == ["x", "10", "80", "float64"] + assert var_1 == ["y", "20", "80", "int32"] + assert lines[-2] == "Upper bound on total bytes = 160" + + def test_who_dict_arrays(self, capsys): + var_dict = {"x": cupy.ones(10)} + cupy.who(var_dict) + out, err = capsys.readouterr() + lines = out.split("\n") + assert lines[-4].split() == ["x", "10", "80", "float64"] + assert lines[-2] == "Upper bound on total bytes = 80" + + def test_who_dict_empty(self, capsys): + global x + x = cupy.ones(10) + cupy.who({}) + out, err = capsys.readouterr() + lines = out.split("\n") + assert lines[-2] == "Upper bound on total bytes = 0" diff --git a/dpnp/tests/third_party/cupy/padding_tests/test_pad.py b/dpnp/tests/third_party/cupy/padding_tests/test_pad.py index f12eb18a6b97..8a5b2b9d338a 100644 --- a/dpnp/tests/third_party/cupy/padding_tests/test_pad.py +++ b/dpnp/tests/third_party/cupy/padding_tests/test_pad.py @@ -19,7 +19,7 @@ { "array": [numpy.arange(6).reshape([2, 3])], "pad_width": [1, [1, 2], [[1, 2], [3, 4]]], - # mode "mean" is non-exact, so it is tested in a separate class + # mode 'mean' is non-exact, so it is tested in a separate class "mode": [ "constant", "edge", @@ -34,6 +34,7 @@ ) ) class TestPadDefault(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_pad_default(self, xp, dtype): @@ -64,6 +65,7 @@ def f(): ) ) class TestPadDefaultMean(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_almost_equal(decimal=5) def test_pad_default(self, xp, dtype): @@ -86,7 +88,7 @@ def f(): @testing.parameterize( - # mode="constant" + # mode='constant' { "array": numpy.arange(6).reshape([2, 3]), "pad_width": 1, @@ -105,7 +107,7 @@ def f(): "mode": "constant", "constant_values": [[3, 4], [5, 6]], }, - # mode="reflect" + # mode='reflect' { "array": numpy.arange(6).reshape([2, 3]), "pad_width": 1, @@ -124,7 +126,7 @@ def f(): "mode": "reflect", "reflect_type": "odd", }, - # mode="symmetric" + # mode='symmetric' { "array": numpy.arange(6).reshape([2, 3]), "pad_width": 1, @@ -143,7 +145,7 @@ def f(): "mode": "symmetric", "reflect_type": "odd", }, - # mode="minimum" + # mode='minimum' { "array": numpy.arange(60).reshape([5, 12]), "pad_width": 1, @@ -168,7 +170,7 @@ def f(): "mode": "minimum", "stat_length": None, }, - # mode="maximum" + # mode='maximum' { "array": numpy.arange(60).reshape([5, 12]), "pad_width": 1, @@ -196,6 +198,7 @@ def f(): ) # Old numpy does not work with multi-dimensional constant_values class TestPad(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_pad(self, xp, dtype): @@ -234,7 +237,7 @@ def f(): @testing.parameterize( - # mode="mean" + # mode='mean' { "array": numpy.arange(60).reshape([5, 12]), "pad_width": 1, @@ -262,6 +265,7 @@ def f(): ) # Old numpy does not work with multi-dimensional constant_values class TestPadMean(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_almost_equal(decimal=5) def test_pad(self, xp, dtype): @@ -289,6 +293,7 @@ def f(): class TestPadNumpybug(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_pad_highdim_default(self, xp, dtype): @@ -302,6 +307,7 @@ def test_pad_highdim_default(self, xp, dtype): class TestPadEmpty(unittest.TestCase): + @testing.with_requires("numpy>=1.17") @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() @@ -314,6 +320,7 @@ def test_pad_empty(self, xp, dtype): class TestPadCustomFunction(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_pad_via_func(self, xp, dtype): @@ -327,7 +334,7 @@ def _padwithtens(vector, pad_width, iaxis, kwargs): @testing.parameterize( - # mode="constant" + # mode='constant' {"array": [], "pad_width": 1, "mode": "constant", "constant_values": 3}, {"array": 1, "pad_width": 1, "mode": "constant", "constant_values": 3}, { @@ -342,16 +349,17 @@ def _padwithtens(vector, pad_width, iaxis, kwargs): "mode": "constant", "constant_values": 3, }, - # mode="edge" + # mode='edge' {"array": 1, "pad_width": 1, "mode": "edge"}, {"array": [0, 1, 2, 3], "pad_width": 1, "mode": "edge"}, {"array": [0, 1, 2, 3], "pad_width": [1, 2], "mode": "edge"}, - # mode="reflect" + # mode='reflect' {"array": 1, "pad_width": 1, "mode": "reflect"}, {"array": [0, 1, 2, 3], "pad_width": 1, "mode": "reflect"}, {"array": [0, 1, 2, 3], "pad_width": [1, 2], "mode": "reflect"}, ) class TestPadSpecial(unittest.TestCase): + @testing.numpy_cupy_array_equal(type_check=has_support_aspect64()) def test_pad_special(self, xp): array = xp.array(self.array) @@ -389,12 +397,7 @@ def test_pad_special(self, xp): }, # edge {"array": [], "pad_width": 1, "mode": "edge", "kwargs": {}}, - { - "array": [0, 1, 2, 3], - "pad_width": [-1, 1], - "mode": "edge", - "kwargs": {}, - }, + {"array": [0, 1, 2, 3], "pad_width": [-1, 1], "mode": "edge", "kwargs": {}}, { "array": [0, 1, 2, 3], "pad_width": [[3, 4], [5, 6]], @@ -407,7 +410,7 @@ def test_pad_special(self, xp): "mode": "edge", "kwargs": {"notallowedkeyword": 3}, }, - # mode="reflect" + # mode='reflect' {"array": [], "pad_width": 1, "mode": "reflect", "kwargs": {}}, { "array": [0, 1, 2, 3], @@ -430,6 +433,7 @@ def test_pad_special(self, xp): ) @testing.with_requires("numpy>=1.17") class TestPadValueError(unittest.TestCase): + def test_pad_failure(self): for xp in (numpy, cupy): array = xp.array(self.array) @@ -446,10 +450,11 @@ def test_pad_failure(self): }, # edge {"array": [0, 1, 2, 3], "pad_width": [], "mode": "edge", "kwargs": {}}, - # mode="reflect" + # mode='reflect' {"array": [0, 1, 2, 3], "pad_width": [], "mode": "reflect", "kwargs": {}}, ) class TestPadTypeError(unittest.TestCase): + def test_pad_failure(self): for xp in (numpy, cupy): array = xp.array(self.array) diff --git a/dpnp/tests/third_party/cupy/random_tests/common_distributions.py b/dpnp/tests/third_party/cupy/random_tests/common_distributions.py new file mode 100644 index 000000000000..b1181e14fbed --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/common_distributions.py @@ -0,0 +1,517 @@ +import functools +import unittest + +import numpy + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing +from dpnp.tests.third_party.cupy.testing import _condition + + +def two_sample_Kolmogorov_Smirnov_test(observed1, observed2): + """Computes the Kolmogorov-Smirnov statistic on 2 samples + + Unlike `scipy.stats.ks_2samp`, the returned p-value is not accurate + for large p. + """ + assert observed1.dtype == observed2.dtype + (n1,) = observed1.shape + (n2,) = observed2.shape + assert n1 >= 100 and n2 >= 100 + observed = numpy.concatenate([observed1, observed2]) + indices = numpy.argsort(observed) + observed = observed[indices] # sort + ds = numpy.cumsum(numpy.where(indices < n1, -n2, n1).astype(numpy.int64)) + assert ds[-1] == 0 + check = numpy.concatenate([observed[:-1] < observed[1:], [True]]) + ds = ds[check] + d_plus = float(ds.max()) / (n1 * n2) + d_minus = -float(ds.min()) / (n1 * n2) + d = max(d_plus, d_minus) + # Approximate p = special.kolmogorov(d * numpy.sqrt(n1 * n2 / (n1 + n2))) + p = min(1.0, 2.0 * numpy.exp(-2.0 * d**2 * n1 * n2 / (n1 + n2))) + return d_plus, d_minus, p + + +class BaseGeneratorTestCase(unittest.TestCase): + + target_method = None + + def get_rng(self, xp, seed): + pass + + def set_rng_seed(self, seed): + pass + + def setUp(self): + self.__seed = testing.generate_seed() + # rng will be a new or old generator API object + self.rng = self.get_rng(cupy, self.__seed) + + def _get_generator_func(self, *args, **kwargs): + assert isinstance( + self.target_method, str + ), "generate_method must be overridden" + f = getattr(self.rng, self.target_method) + return lambda: f(*args, **kwargs) + + def _generate_check_repro(self, func, seed): + # Sample a random array while checking reproducibility + self.set_rng_seed(seed) + x = func() + self.set_rng_seed(seed) + y = func() + testing.assert_array_equal( + x, y, "Randomly generated arrays with the same seed did not match" + ) + return x + + def generate(self, *args, **kwargs): + # Pick one sample from generator. + # Reproducibility is checked by repeating seed-and-sample cycle twice. + func = self._get_generator_func(*args, **kwargs) + return self._generate_check_repro(func, self.__seed) + + def generate_many(self, *args, **kwargs): + # Pick many samples from generator. + # Reproducibility is checked only for the first sample, + # because it's very slow to set seed every time. + _count = kwargs.pop("_count", None) + assert _count is not None, "_count is required" + func = self._get_generator_func(*args, **kwargs) + + if _count == 0: + return [] + + vals = [self._generate_check_repro(func, self.__seed)] + for _ in range(1, _count): + vals.append(func()) + return vals + + def check_ks(self, significance_level, cupy_len=100, numpy_len=1000): + return functools.partial( + self._check_ks, significance_level, cupy_len, numpy_len + ) + + def _check_ks( + self, significance_level, cupy_len, numpy_len, *args, **kwargs + ): + assert "size" in kwargs + + # cupy + func = self._get_generator_func(*args, **kwargs) + vals_cupy = func() + assert vals_cupy.size > 0 + count = 1 + (cupy_len - 1) // vals_cupy.size + vals_cupy = [vals_cupy] + for _ in range(1, count): + vals_cupy.append(func()) + vals_cupy = cupy.stack(vals_cupy).ravel() + + # numpy + kwargs["size"] = numpy_len + dtype = kwargs.pop("dtype", None) + numpy_rng = self.get_rng(numpy, self.__seed) + vals_numpy = getattr(numpy_rng, self.target_method)(*args, **kwargs) + if dtype is not None: + vals_numpy = vals_numpy.astype(dtype, copy=False) + + # test + d_plus, d_minus, p_value = two_sample_Kolmogorov_Smirnov_test( + cupy.asnumpy(vals_cupy), vals_numpy + ) + if p_value < significance_level: + message = """Rejected null hypothesis: +p: %f +D+ (cupy < numpy): %f +D- (cupy > numpy): %f""" % ( + p_value, + d_plus, + d_minus, + ) + raise AssertionError(message) + + +uniform_params = [ + {"low": 1, "high": 10.0, "size": (3, 5)}, + {"low": [1, 2], "high": 3, "size": None}, + {"low": 20, "high": 20.1, "size": 1000}, +] + + +class Uniform: + target_method = "uniform" + + def test_uniform(self): + low = self.low + if isinstance(low, list): + low = cupy.array(low) + high = self.high + if isinstance(high, list): + high = cupy.array(high) + + result = self.generate(low, high, self.size) + assert cupy.all(result >= cupy.asarray(low).min()) + assert cupy.all(result < cupy.asarray(high).max()) + + @_condition.repeat_with_success_at_least(10, 3) + def test_uniform_ks(self): + if isinstance(self.low, list) or isinstance(self.high, list): + self.skipTest("Stastical checks only for scalar args") + self.check_ks(0.05)(low=self.low, high=self.low, size=2000) + + +beta_params = [ + {"a": 1.0, "b": 3.0}, + {"a": 3.0, "b": 3.0}, + {"a": 3.0, "b": 1.0}, + {"a": [1.0, 3.0, 5.0, 6.0, 9.0], "b": 7.0}, + {"a": 5.0, "b": [1.0, 5.0, 8.0, 1.0, 3.0]}, + {"a": [8.0, 6.0, 2.0, 4.0, 7.0], "b": [3.0, 1.0, 2.0, 8.0, 1.0]}, +] + + +class Beta: + + target_method = "beta" + + def test_beta(self): + a = self.a + b = self.b + if isinstance(self.a, list) or isinstance(self.b, list): + a = cupy.array(self.a) + b = cupy.array(self.b) + self.generate(a, b, size=(3, 5)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_beta_ks(self): + if isinstance(self.a, list) or isinstance(self.b, list): + self.skipTest("Stastical checks only for scalar args") + self.check_ks(0.05)(a=self.a, b=self.b, size=2000) + + +class StandardExponential: + + target_method = "standard_exponential" + + def test_standard_exponential(self): + self.generate(size=(3, 2)) + + @testing.slow + @_condition.repeat(10) + def test_standard_exponential_isfinite(self): + x = self.generate(size=10**7) + assert cupy.isfinite(x).all() + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_standard_exponential_ks(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + +standard_gamma_params = [{"shape": 0.5}, {"shape": 1.0}, {"shape": 3.0}] + + +class StandardGamma: + + target_method = "standard_gamma" + + def test_standard_gamma(self): + self.generate(shape=self.shape, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_standard_gamma_ks(self, dtype): + self.check_ks(0.05)(shape=self.shape, size=2000, dtype=dtype) + + +standard_normal_params = [ + {"size": None}, + {"size": (1, 2, 3)}, + {"size": 3}, + {"size": (1000, 1000)}, + {"size": (3, 3)}, + {"size": ()}, +] + + +class StandardNormal: + + target_method = "standard_normal" + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_normal_ks(self, dtype): + self.check_ks(0.05)(size=self.size, dtype=dtype) + + +exponential_params = [{"scale": 0.5}, {"scale": 1}, {"scale": 10}] + + +class Exponential: + + target_method = "exponential" + + def test_exponential(self): + self.generate(scale=self.scale, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_exponential_ks(self): + self.check_ks(0.05)(self.scale, size=2000) + + +poisson_params = [{"lam": 1.0}, {"lam": 3.0}, {"lam": 10.0}] + + +class Poisson: + + target_method = "poisson" + + def test_poisson(self): + self.generate(lam=self.lam, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_poisson_ks(self): + self.check_ks(0.05)(lam=self.lam, size=2000) + + def test_poisson_large(self): + self.generate(lam=self.lam, size=(1000, 1000)) + + +gamma_params = [ + {"shape": 0.5, "scale": 0.5}, + {"shape": 1.0, "scale": 0.5}, + {"shape": 3.0, "scale": 0.5}, + {"shape": 0.5, "scale": 1.0}, + {"shape": 1.0, "scale": 1.0}, + {"shape": 3.0, "scale": 1.0}, + {"shape": 0.5, "scale": 3.0}, + {"shape": 1.0, "scale": 3.0}, + {"shape": 3.0, "scale": 3.0}, +] + + +class Gamma: + + target_method = "gamma" + + def test_gamma_1(self): + self.generate(shape=self.shape, scale=self.scale, size=(3, 2)) + + def test_gamma_2(self): + self.generate(shape=self.shape, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_gamma_ks(self): + self.check_ks(0.05)(self.shape, self.scale, size=2000) + + +binomial_params = [ + {"n": 2, "p": 0.5}, + {"n": 5, "p": 0.5}, + {"n": 10, "p": 0.5}, + {"n": 2, "p": 0.1}, + {"n": 5, "p": 0.1}, + {"n": 10, "p": 0.1}, + {"n": 2, "p": 1.0}, + {"n": 2, "p": 1.0}, + {"n": 2, "p": 1.0}, +] + + +class Binomial: + + target_method = "binomial" + + def test_binomial(self): + self.generate(n=self.n, p=self.p, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_binomial_ks(self): + self.check_ks(0.05)(self.n, self.p, size=2000) + + +geometric_params = [ + {"p": 0.5}, + {"p": 0.1}, + {"p": 1.0}, + {"p": [0.1, 0.5]}, +] + + +class Geometric: + + target_method = "geometric" + + def test_geometric(self): + p = self.p + if not isinstance(self.p, float): + p = cupy.array(self.p) + self.generate(p=p, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_geometric_ks(self): + if not isinstance(self.p, float): + self.skipTest("Statistical checks only for scalar `p`") + self.check_ks(0.05)(p=self.p, size=2000) + + +hypergeometric_params = [ + {"ngood": 5, "nbad": 5, "nsample": 5}, + {"ngood": 10, "nbad": 10, "nsample": 10}, + {"ngood": 100, "nbad": 2, "nsample": 10}, + {"ngood": [0, 5, 8], "nbad": [5, 0, 3], "nsample": [2, 1, 8]}, + {"ngood": [1, 4, 2, 7, 6], "nbad": 5.0, "nsample": [2, 7, 4, 6, 5]}, +] + + +class Hypergeometric: + + target_method = "hypergeometric" + + def test_hypergeometric(self): + ngood = self.ngood + nbad = self.nbad + nsample = self.nsample + if ( + isinstance(self.ngood, list) + or isinstance(self.nbad, list) + or isinstance(self.nsample, list) + ): + ngood = cupy.array(self.ngood) + nbad = cupy.array(self.nbad) + nsample = cupy.array(self.nsample) + self.generate(ngood, nbad, nsample) + + @_condition.repeat_with_success_at_least(10, 3) + def test_hypergeometric_ks(self): + if ( + isinstance(self.ngood, list) + or isinstance(self.nbad, list) + or isinstance(self.nsample, list) + ): + self.skipTest("Stastical checks only for scalar args") + self.check_ks(0.05)(self.ngood, self.nbad, self.nsample, size=2000) + + +power_params = [ + {"a": 0.5}, + {"a": 1}, + {"a": 5}, + {"a": [0.8, 0.7, 1, 2, 5]}, +] + + +class Power: + + target_method = "power" + + def test_power(self): + a = self.a + if not isinstance(self.a, float): + a = cupy.array(self.a) + self.generate(a=a) + + @_condition.repeat_with_success_at_least(10, 3) + def test_power_ks(self): + if not isinstance(self.a, float): + self.skipTest("Statistical checks only for scalar `a`") + self.check_ks(0.05)(a=self.a, size=2000) + + +logseries_params = [ + {"p": 0.5}, + {"p": 0.1}, + {"p": 0.9}, + {"p": [0.8, 0.7]}, +] + + +class Logseries: + + target_method = "logseries" + + def test_logseries(self): + p = self.p + if not isinstance(self.p, float): + p = cupy.array(self.p) + self.generate(p=p, size=(3, 2)) + + @_condition.repeat_with_success_at_least(10, 3) + def test_geometric_ks(self): + if not isinstance(self.p, float): + self.skipTest("Statistical checks only for scalar `p`") + self.check_ks(0.05)(p=self.p, size=2000) + + +chisquare_params = [ + {"df": 1.0}, + {"df": 3.0}, + {"df": 10.0}, + {"df": [2, 5, 8]}, +] + + +class Chisquare: + + target_method = "chisquare" + + def test_chisquare(self): + df = self.df + if not isinstance(self.df, float): + df = cupy.array(self.df) + self.generate(df=df) + + @_condition.repeat_with_success_at_least(10, 3) + def test_chisquare_ks(self): + if not isinstance(self.df, float): + self.skipTest("Statistical checks only for scalar `df`") + self.check_ks(0.05)(df=self.df, size=2000) + + +f_params = [ + {"dfnum": 1.0, "dfden": 3.0}, + {"dfnum": 3.0, "dfden": 3.0}, + {"dfnum": 3.0, "dfden": 1.0}, + {"dfnum": [1.0, 3.0, 3.0], "dfden": [3.0, 3.0, 1.0]}, +] + + +class F: + + target_method = "f" + + def test_f(self): + dfnum = self.dfnum + dfden = self.dfden + if isinstance(self.dfnum, list) or isinstance(self.dfden, list): + dfnum = cupy.array(self.dfnum) + dfden = cupy.array(self.dfden) + self.generate(dfnum, dfden) + + @_condition.repeat_with_success_at_least(10, 3) + def test_f_ks(self): + if isinstance(self.dfnum, list) or isinstance(self.dfden, list): + self.skipTest("Stastical checks only for scalar args") + self.check_ks(0.05)(self.dfnum, self.dfden, size=2000) + + +dirichlet_params = [{"alpha": 5}, {"alpha": 1}, {"alpha": [2, 5, 8]}] + + +class Dirichlet: + target_method = "dirichlet" + + def test_dirichlet(self): + alpha = self.alpha + if not isinstance(self.alpha, float): + alpha = cupy.array(self.alpha) + self.generate(alpha=alpha, size=(3, 2)) + + def test_dirichlet_int_shape(self): + alpha = self.alpha + if not isinstance(self.alpha, int): + alpha = cupy.array(self.alpha) + self.generate(alpha=alpha, size=5) + + # TODO(kataoka): add distribution test diff --git a/dpnp/tests/third_party/cupy/random_tests/test_bit_generator.py b/dpnp/tests/third_party/cupy/random_tests/test_bit_generator.py new file mode 100644 index 000000000000..a94202cf19b7 --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_bit_generator.py @@ -0,0 +1,80 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp import random +from dpnp.tests.third_party.cupy import testing + +pytest.skip("bit generator is not supported yet", allow_module_level=True) + + +class BitGeneratorTestCase: + + def setUp(self): + self.seed = testing.generate_seed() + + def check_seed(self, seed): + bg1 = self.bg(seed) + bg2 = self.bg(seed) + bg3 = self.bg(None) + + xs1 = bg1.random_raw(10) + xs2 = bg2.random_raw(10) + xs3 = bg3.random_raw(10) + + # Random state must be reproducible + assert cupy.array_equal(xs1, xs2) + # Random state must be initialized randomly with seed=None + assert not cupy.array_equal(xs1, xs3) + + @testing.for_int_dtypes(no_bool=True) + def test_seed_not_none(self, dtype): + self.check_seed(dtype(0)) + + @testing.for_dtypes([numpy.complex128]) + def test_seed_invalid_type_complex(self, dtype): + with self.assertRaises(TypeError): + self.bg(dtype(0)) + + @testing.for_float_dtypes() + def test_seed_invalid_type_float(self, dtype): + with self.assertRaises(TypeError): + self.bg(dtype(0)) + + def test_array_seed(self): + self.check_seed(numpy.random.randint(0, 2**31, size=10)) + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestBitGeneratorXORWOW(BitGeneratorTestCase, unittest.TestCase): + def setUp(self): + super().setUp() + self.bg = random._bit_generator.XORWOW + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestBitGeneratorMRG32k3a(BitGeneratorTestCase, unittest.TestCase): + def setUp(self): + super().setUp() + self.bg = random._bit_generator.MRG32k3a + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip, reason="HIP does not support this" +) +class TestBitGeneratorPhilox4x3210(BitGeneratorTestCase, unittest.TestCase): + def setUp(self): + super().setUp() + self.bg = random._bit_generator.Philox4x3210 diff --git a/dpnp/tests/third_party/cupy/random_tests/test_distributions.py b/dpnp/tests/third_party/cupy/random_tests/test_distributions.py index feda42d6ead5..16ad014645aa 100644 --- a/dpnp/tests/third_party/cupy/random_tests/test_distributions.py +++ b/dpnp/tests/third_party/cupy/random_tests/test_distributions.py @@ -1,32 +1,48 @@ -import unittest - import numpy import pytest import dpnp as cupy from dpnp import random as _distributions +from dpnp.tests.helper import has_support_aspect64, is_win_platform from dpnp.tests.third_party.cupy import testing -from dpnp.tests.third_party.cupy.testing import _helper, _loops -_regular_float_dtypes = (numpy.float64, numpy.float32) +if has_support_aspect64(): + _regular_float_dtypes = (numpy.float64, numpy.float32) +else: + _regular_float_dtypes = (numpy.float32,) _float_dtypes = _regular_float_dtypes + (numpy.float16,) _signed_dtypes = tuple(numpy.dtype(i).type for i in "bhilq") _unsigned_dtypes = tuple(numpy.dtype(i).type for i in "BHILQ") _int_dtypes = _signed_dtypes + _unsigned_dtypes -class RandomDistributionsTestCase(unittest.TestCase): - def check_distribution(self, dist_name, params): +class RandomDistributionsTestCase: + def check_distribution(self, dist_name, params, dtype=None): cp_params = {k: cupy.asarray(params[k]) for k in params} np_out = numpy.asarray( - getattr(numpy.random, dist_name)(size=self.shape, **params) + getattr(numpy.random, dist_name)(size=self.shape, **params), dtype ) + dt_kward = {dtype: dtype} if dtype else {} cp_out = getattr(_distributions, dist_name)( - size=self.shape, **cp_params + size=self.shape, **dt_kward, **cp_params ) - - self.assertEqual(cp_out.shape, np_out.shape) - self.assertEqual(cp_out.dtype, np_out.dtype) + if np_out.ndim > 0: + assert cp_out.shape == np_out.shape + if np_out.dtype == numpy.float64 and has_support_aspect64(): + assert cp_out.dtype == np_out.dtype + else: + assert cp_out.dtype.kind == np_out.dtype.kind + + def check_generator_distribution(self, dist_name, params, dtype): + cp_params = {k: cupy.asarray(params[k]) for k in params} + np_gen = numpy.random.default_rng(0) + cp_gen = cupy.random.default_rng(0) + np_out = numpy.asarray( + getattr(np_gen, dist_name)(size=self.shape, **params) + ) + cp_out = getattr(cp_gen, dist_name)(size=self.shape, **cp_params) + assert cp_out.shape == np_out.shape + assert cp_out.dtype == np_out.dtype @testing.parameterize( @@ -35,18 +51,19 @@ def check_distribution(self, dist_name, params): "shape": [(4, 3, 2), (3, 2)], "a_shape": [(), (3, 2)], "b_shape": [(), (3, 2)], + # "dtype": _float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsBeta(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["a_dtype", "b_dtype"] - ) + + @testing.for_dtypes_combination(_float_dtypes, names=["a_dtype", "b_dtype"]) def test_beta(self, a_dtype, b_dtype): a = numpy.full(self.a_shape, 3, dtype=a_dtype) b = numpy.full(self.b_shape, 3, dtype=b_dtype) - self.check_distribution("beta", {"a": a, "b": b}) + self.check_distribution("beta", {"a": a, "b": b}, self.dtype) @testing.parameterize( @@ -60,8 +77,9 @@ def test_beta(self, a_dtype, b_dtype): ) ) class TestDistributionsBinomial(RandomDistributionsTestCase): - @_loops.for_signed_dtypes("n_dtype") - @_loops.for_float_dtypes("p_dtype") + + @testing.for_signed_dtypes("n_dtype") + @testing.for_float_dtypes("p_dtype") def test_binomial(self, n_dtype, p_dtype): if numpy.dtype("l") == numpy.int32 and n_dtype == numpy.int64: pytest.skip("n must be able to cast to long") @@ -79,15 +97,16 @@ def test_binomial(self, n_dtype, p_dtype): ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsChisquare(unittest.TestCase): +class TestDistributionsChisquare: + def check_distribution(self, dist_func, df_dtype): df = cupy.full(self.df_shape, 5, dtype=df_dtype) out = dist_func(df, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_float_dtypes("df_dtype") + @testing.for_float_dtypes("df_dtype") + # @testing.for_float_dtypes("dtype") # dtype is not supported def test_chisquare(self, df_dtype): self.check_distribution(_distributions.chisquare, df_dtype) @@ -102,7 +121,10 @@ def test_chisquare(self, df_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsDirichlet(RandomDistributionsTestCase): - @_loops.for_dtypes_combination(_regular_float_dtypes, names=["alpha_dtype"]) + + @testing.for_dtypes_combination( + _float_dtypes, names=["alpha_dtype"] # dtype is not supported + ) def test_dirichlet(self, alpha_dtype): alpha = numpy.ones(self.alpha_shape, dtype=alpha_dtype) self.check_distribution("dirichlet", {"alpha": alpha}) @@ -118,7 +140,9 @@ def test_dirichlet(self, alpha_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsExponential(RandomDistributionsTestCase): - @_loops.for_float_dtypes("scale_dtype") + + # @testing.for_float_dtypes("dtype", no_float16=True) # dtype is not supported + @testing.for_float_dtypes("scale_dtype") def test_exponential(self, scale_dtype): scale = numpy.ones(self.scale_shape, dtype=scale_dtype) self.check_distribution("exponential", {"scale": scale}) @@ -126,9 +150,10 @@ def test_exponential(self, scale_dtype): @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsExponentialError(RandomDistributionsTestCase): + def test_negative_scale(self): scale = cupy.array([2, -1, 3], dtype=numpy.float32) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): cupy.random.exponential(scale) @@ -138,49 +163,75 @@ def test_negative_scale(self): "shape": [(4, 3, 2), (3, 2)], "dfnum_shape": [(), (3, 2)], "dfden_shape": [(), (3, 2)], + # "dtype": _float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsF(unittest.TestCase): - def check_distribution(self, dist_func, dfnum_dtype, dfden_dtype): +class TestDistributionsF: + + def check_distribution(self, dist_func, dfnum_dtype, dfden_dtype, dtype): dfnum = cupy.ones(self.dfnum_shape, dtype=dfnum_dtype) dfden = cupy.ones(self.dfden_shape, dtype=dfden_dtype) out = dist_func(dfnum, dfden, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_float_dtypes("dfnum_dtype") - @_loops.for_float_dtypes("dfden_dtype") + @testing.for_float_dtypes("dfnum_dtype") + @testing.for_float_dtypes("dfden_dtype") def test_f(self, dfnum_dtype, dfden_dtype): - self.check_distribution(_distributions.f, dfnum_dtype, dfden_dtype) + self.check_distribution( + _distributions.f, dfnum_dtype, dfden_dtype, self.dtype + ) @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "shape_shape": [(), (3, 2)], "scale_shape": [(), (3, 2)], + # "dtype": _float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsGamma(unittest.TestCase): - def check_distribution(self, dist_func, shape_dtype, scale_dtype): +class TestDistributionsGamma: + + def check_distribution( + self, dist_func, shape_dtype, scale_dtype, dtype=None + ): shape = cupy.ones(self.shape_shape, dtype=shape_dtype) scale = cupy.ones(self.scale_shape, dtype=scale_dtype) - out = dist_func(shape, scale, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + if dtype is None: + out = dist_func(shape, scale, self.shape) + else: + out = dist_func(shape, scale, self.shape, dtype) + out_shape = self.shape + if self.shape is None: + out_shape = shape.shape if shape.shape != () else scale.shape + if self.shape is not None: + assert out_shape == out.shape + # assert out.dtype == dtype + + @testing.for_dtypes_combination( + _float_dtypes, names=["shape_dtype", "scale_dtype"] + ) + def test_gamma_legacy(self, shape_dtype, scale_dtype): + self.check_distribution( + _distributions.gamma, shape_dtype, scale_dtype, self.dtype + ) - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["shape_dtype", "scale_dtype"] + @pytest.mark.skip("no support of generator yet") + @testing.for_dtypes_combination( + _float_dtypes, names=["shape_dtype", "scale_dtype"] ) - def test_gamma(self, shape_dtype, scale_dtype): - self.check_distribution(_distributions.gamma, shape_dtype, scale_dtype) + def test_gamma_generator(self, shape_dtype, scale_dtype): + self.check_distribution( + cupy.random.default_rng().gamma, shape_dtype, scale_dtype + ) @testing.parameterize( @@ -188,26 +239,29 @@ def test_gamma(self, shape_dtype, scale_dtype): { "shape": [(4, 3, 2), (3, 2)], "p_shape": [(), (3, 2)], + # "dtype": _int_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) -class TestDistributionsGeometric(unittest.TestCase): - def check_distribution(self, dist_func, p_dtype): +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +class TestDistributionsGeometric: + + def check_distribution(self, dist_func, p_dtype, dtype): p = 0.5 * cupy.ones(self.p_shape, dtype=p_dtype) out = dist_func(p, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy output dtype is int64, dpnp output is int32 - self.assertEqual(out.dtype, numpy.int64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_float_dtypes("p_dtype") + @testing.for_float_dtypes("p_dtype") def test_geometric(self, p_dtype): - self.check_distribution(_distributions.geometric, p_dtype) + self.check_distribution(_distributions.geometric, p_dtype, self.dtype) @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "loc_shape": [(), (3, 2)], "scale_shape": [(), (3, 2)], } @@ -215,8 +269,10 @@ def test_geometric(self, p_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsGumbel(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["loc_dtype", "scale_dtype"] ) def test_gumbel(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) @@ -232,37 +288,48 @@ def test_gumbel(self, loc_dtype, scale_dtype): "nbad_shape": [(), (3, 2)], "nsample_shape": [(), (3, 2)], "nsample_dtype": [numpy.int32, numpy.int64], # to escape timeout + # "dtype": [numpy.int32, numpy.int64], # to escape timeout + "dtype": [None], # no dtype supported } ) ) -class TestDistributionsHyperGeometric(unittest.TestCase): +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +class TestDistributionsHyperGeometric: + def check_distribution( - self, dist_func, ngood_dtype, nbad_dtype, nsample_dtype + self, dist_func, ngood_dtype, nbad_dtype, nsample_dtype, dtype ): ngood = cupy.ones(self.ngood_shape, dtype=ngood_dtype) nbad = cupy.ones(self.nbad_shape, dtype=nbad_dtype) nsample = cupy.ones(self.nsample_shape, dtype=nsample_dtype) out = dist_func(ngood, nbad, nsample, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy output dtype is int64, dpnp output is int32 - self.assertEqual(out.dtype, numpy.int64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_dtypes_combination( + @testing.for_dtypes_combination( [numpy.int32, numpy.int64], names=["ngood_dtype", "nbad_dtype"] ) def test_hypergeometric(self, ngood_dtype, nbad_dtype): + if ( + is_win_platform() + and numpy.int64 in (self.nsample_dtype, ngood_dtype, nbad_dtype) + and numpy.lib.NumpyVersion(numpy.__version__) < "2.0.0" + ): + pytest.skip("numpy raises TypeError") + self.check_distribution( _distributions.hypergeometric, ngood_dtype, nbad_dtype, self.nsample_dtype, + self.dtype, ) @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "loc_shape": [(), (3, 2)], "scale_shape": [(), (3, 2)], } @@ -270,8 +337,10 @@ def test_hypergeometric(self, ngood_dtype, nbad_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsuLaplace(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["loc_dtype", "scale_dtype"] ) def test_laplace(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) @@ -290,8 +359,10 @@ def test_laplace(self, loc_dtype, scale_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsLogistic(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["loc_dtype", "scale_dtype"] ) def test_logistic(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) @@ -302,15 +373,18 @@ def test_logistic(self, loc_dtype, scale_dtype): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], - "mean_shape": [()], - "sigma_shape": [()], + "shape": [(4, 3, 2), (3, 2), None], + "mean_shape": [(), (3, 2)], + "sigma_shape": [(), (3, 2)], } ) ) +@pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsLognormal(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["mean_dtype", "sigma_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["mean_dtype", "sigma_dtype"] ) def test_lognormal(self, mean_dtype, sigma_dtype): mean = numpy.ones(self.mean_shape, dtype=mean_dtype) @@ -326,18 +400,22 @@ def test_lognormal(self, mean_dtype, sigma_dtype): } ) ) +@pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsLogseries(RandomDistributionsTestCase): - @_loops.for_float_dtypes("p_dtype", no_float16=True) + + # @testing.for_dtypes([numpy.int64, numpy.int32], "dtype") # no dtype supported + @testing.for_float_dtypes("p_dtype", no_float16=True) def test_logseries(self, p_dtype): p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) self.check_distribution("logseries", {"p": p}) - @_loops.for_float_dtypes("p_dtype", no_float16=True) + # @testing.for_dtypes([numpy.int64, numpy.int32], "dtype") # no dtype supported + @testing.for_float_dtypes("p_dtype", no_float16=True) def test_logseries_for_invalid_p(self, p_dtype): - with self.assertRaises(ValueError): - cp_params = {"p": cupy.zeros(self.p_shape, dtype=p_dtype)} - _distributions.logseries(size=self.shape, **cp_params) - with self.assertRaises(ValueError): + # with pytest.raises(ValueError): # no exception raised by numpy + # cp_params = {"p": cupy.zeros(self.p_shape, dtype=p_dtype)} + # _distributions.logseries(size=self.shape, **cp_params) + with pytest.raises(ValueError): cp_params = {"p": cupy.ones(self.p_shape, dtype=p_dtype)} _distributions.logseries(size=self.shape, **cp_params) @@ -350,26 +428,23 @@ def test_logseries_for_invalid_p(self, p_dtype): } ) ) -class TestDistributionsMultivariateNormal(unittest.TestCase): - def check_distribution(self, dist_func, mean_dtype, cov_dtype): +@pytest.mark.skip("multivariate_normal is not fully supported yet") +class TestDistributionsMultivariateNormal: + + def check_distribution(self, dist_func, mean_dtype, cov_dtype, dtype): mean = cupy.zeros(self.d, dtype=mean_dtype) - cov = cupy.random.normal(size=(self.d, self.d)) - # dpnp.dpnp_array doesn't have .dot - # TODO - # no conversation to ndarray - cov = numpy.array(cov) + cov = cupy.random.normal(size=(self.d, self.d), dtype=cov_dtype) cov = cov.T.dot(cov) - cov = cupy.array(cov) - out = dist_func(mean, cov, self.shape) - self.assertEqual(self.shape + (self.d,), out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) - - @_loops.for_float_dtypes("mean_dtype", no_float16=True) - @_loops.for_float_dtypes("cov_dtype", no_float16=True) - def test_normal(self, mean_dtype, cov_dtype): + out = dist_func(mean, cov, self.shape, dtype=dtype) + assert self.shape + (self.d,) == out.shape + assert out.dtype == dtype + + @testing.for_float_dtypes("dtype", no_float16=True) + @testing.for_float_dtypes("mean_dtype", no_float16=True) + @testing.for_float_dtypes("cov_dtype", no_float16=True) + def test_normal(self, mean_dtype, cov_dtype, dtype): self.check_distribution( - _distributions.multivariate_normal, mean_dtype, cov_dtype + _distributions.multivariate_normal, mean_dtype, cov_dtype, dtype ) @@ -379,24 +454,31 @@ def test_normal(self, mean_dtype, cov_dtype): "shape": [(4, 3, 2), (3, 2)], "n_shape": [(), (3, 2)], "p_shape": [(), (3, 2)], + # "dtype": _int_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsNegativeBinomial(RandomDistributionsTestCase): - @_loops.for_float_dtypes("n_dtype") - @_loops.for_float_dtypes("p_dtype") + + @testing.for_float_dtypes("n_dtype") + @testing.for_float_dtypes("p_dtype") def test_negative_binomial(self, n_dtype, p_dtype): n = numpy.full(self.n_shape, 5, dtype=n_dtype) p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution("negative_binomial", {"n": n, "p": p}) + self.check_distribution( + "negative_binomial", {"n": n, "p": p}, self.dtype + ) - @_loops.for_float_dtypes("n_dtype") - @_loops.for_float_dtypes("p_dtype") + @testing.for_float_dtypes("n_dtype") + @testing.for_float_dtypes("p_dtype") def test_negative_binomial_for_noninteger_n(self, n_dtype, p_dtype): n = numpy.full(self.n_shape, 5.5, dtype=n_dtype) p = numpy.full(self.p_shape, 0.5, dtype=p_dtype) - self.check_distribution("negative_binomial", {"n": n, "p": p}) + self.check_distribution( + "negative_binomial", {"n": n, "p": p}, self.dtype + ) @testing.parameterize( @@ -405,31 +487,34 @@ def test_negative_binomial_for_noninteger_n(self, n_dtype, p_dtype): "shape": [(4, 3, 2), (3, 2)], "df_shape": [(), (3, 2)], "nonc_shape": [(), (3, 2)], + # "dtype": _int_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsNoncentralChisquare(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( + + @testing.for_dtypes_combination( _regular_float_dtypes, names=["df_dtype", "nonc_dtype"] ) def test_noncentral_chisquare(self, df_dtype, nonc_dtype): df = numpy.full(self.df_shape, 1, dtype=df_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=nonc_dtype) self.check_distribution( - "noncentral_chisquare", {"df": df, "nonc": nonc} + "noncentral_chisquare", {"df": df, "nonc": nonc}, self.dtype ) - @_loops.for_float_dtypes("param_dtype", no_float16=True) + @testing.for_float_dtypes("param_dtype", no_float16=True) def test_noncentral_chisquare_for_invalid_params(self, param_dtype): df = cupy.full(self.df_shape, -1, dtype=param_dtype) nonc = cupy.full(self.nonc_shape, 1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.noncentral_chisquare(df, nonc, size=self.shape) df = cupy.full(self.df_shape, 1, dtype=param_dtype) nonc = cupy.full(self.nonc_shape, -1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.noncentral_chisquare(df, nonc, size=self.shape) @@ -440,12 +525,15 @@ def test_noncentral_chisquare_for_invalid_params(self, param_dtype): "dfnum_shape": [(), (3, 2)], "dfden_shape": [(), (3, 2)], "nonc_shape": [(), (3, 2)], + # "dtype": _int_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsNoncentralF(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( + + @testing.for_dtypes_combination( _regular_float_dtypes, names=["dfnum_dtype", "dfden_dtype", "nonc_dtype"], ) @@ -454,34 +542,36 @@ def test_noncentral_f(self, dfnum_dtype, dfden_dtype, nonc_dtype): dfden = numpy.full(self.dfden_shape, 1, dtype=dfden_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=nonc_dtype) self.check_distribution( - "noncentral_f", {"dfnum": dfnum, "dfden": dfden, "nonc": nonc} + "noncentral_f", + {"dfnum": dfnum, "dfden": dfden, "nonc": nonc}, + self.dtype, ) - @_loops.for_float_dtypes("param_dtype", no_float16=True) + @testing.for_float_dtypes("param_dtype", no_float16=True) def test_noncentral_f_for_invalid_params(self, param_dtype): dfnum = numpy.full(self.dfnum_shape, -1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, 1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) dfnum = numpy.full(self.dfnum_shape, 1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, -1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, 1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) dfnum = numpy.full(self.dfnum_shape, 1, dtype=param_dtype) dfden = numpy.full(self.dfden_shape, 1, dtype=param_dtype) nonc = numpy.full(self.nonc_shape, -1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.noncentral_f(dfnum, dfden, nonc, size=self.shape) @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "loc_shape": [(), (3, 2)], "scale_shape": [(), (3, 2)], } @@ -489,8 +579,10 @@ def test_noncentral_f_for_invalid_params(self, param_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsNormal(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["loc_dtype", "scale_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["loc_dtype", "scale_dtype"] ) def test_normal(self, loc_dtype, scale_dtype): loc = numpy.ones(self.loc_shape, dtype=loc_dtype) @@ -501,21 +593,23 @@ def test_normal(self, loc_dtype, scale_dtype): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "a_shape": [(), (3, 2)], } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsPareto(unittest.TestCase): +class TestDistributionsPareto: + def check_distribution(self, dist_func, a_dtype): a = cupy.ones(self.a_shape, dtype=a_dtype) out = dist_func(a, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + if self.shape is not None: + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_float_dtypes("a_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_pareto(self, a_dtype): self.check_distribution(_distributions.pareto, a_dtype) @@ -523,23 +617,49 @@ def test_pareto(self, a_dtype): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "lam_shape": [(), (3, 2)], } ) ) -class TestDistributionsPoisson(unittest.TestCase): - def check_distribution(self, dist_func, lam_dtype): - lam = cupy.full(self.lam_shape, 5, dtype=lam_dtype) - out = dist_func(lam, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy output dtype is int64, dpnp output is int32 - self.assertEqual(out.dtype, numpy.int64) +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +class TestDistributionsPoisson: - @_loops.for_float_dtypes("lam_dtype") - def test_poisson(self, lam_dtype): + def check_distribution(self, dist_func, lam_dtype, dtype=None): + lam = cupy.full(self.lam_shape, 5, dtype=lam_dtype) + if dtype is not None: + out = dist_func(lam, self.shape, dtype) + assert out.dtype == dtype + else: + out = dist_func(lam, self.shape) + if self.shape is not None: + assert self.shape == out.shape + # else: + # assert lam.shape == out.shape + + # @testing.for_int_dtypes("dtype") # no dtype supported + @testing.for_float_dtypes("lam_dtype") + def test_poisson_legacy(self, lam_dtype): self.check_distribution(_distributions.poisson, lam_dtype) + @pytest.mark.skip("no support of generator yet") + @testing.for_float_dtypes("lam_dtype") + def test_poisson_generator(self, lam_dtype): + self.check_distribution(cupy.random.default_rng(0).poisson, lam_dtype) + + +class TestDistributionsPoissonInvalid: + + @pytest.mark.skip("no support of generator yet") + def test_none_lam_generator(self): + with pytest.raises(TypeError): + cupy.random.default_rng(0).poisson(None) + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") + def test_none_lam_legacy(self): + with pytest.raises(TypeError): + _distributions.poisson(None) + @testing.parameterize( *testing.product( @@ -549,16 +669,20 @@ def test_poisson(self, lam_dtype): } ) ) +@pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsPower(RandomDistributionsTestCase): - @_loops.for_float_dtypes("a_dtype") + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_power(self, a_dtype): a = numpy.full(self.a_shape, 0.5, dtype=a_dtype) self.check_distribution("power", {"a": a}) - @_loops.for_float_dtypes("a_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_power_for_negative_a(self, a_dtype): a = numpy.full(self.a_shape, -0.5, dtype=a_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): cp_params = {"a": cupy.asarray(a)} getattr(_distributions, "power")(size=self.shape, **cp_params) @@ -566,27 +690,31 @@ def test_power_for_negative_a(self, a_dtype): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "scale_shape": [(), (3, 2)], } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsRayleigh(RandomDistributionsTestCase): - @_loops.for_float_dtypes("scale_dtype") + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("scale_dtype") def test_rayleigh(self, scale_dtype): scale = numpy.full(self.scale_shape, 3, dtype=scale_dtype) self.check_distribution("rayleigh", {"scale": scale}) - @_loops.for_float_dtypes("scale_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("scale_dtype") def test_rayleigh_for_zero_scale(self, scale_dtype): scale = numpy.zeros(self.scale_shape, dtype=scale_dtype) self.check_distribution("rayleigh", {"scale": scale}) - @_loops.for_float_dtypes("scale_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("scale_dtype") def test_rayleigh_for_negative_scale(self, scale_dtype): scale = numpy.full(self.scale_shape, -0.5, dtype=scale_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): cp_params = {"scale": cupy.asarray(scale)} _distributions.rayleigh(size=self.shape, **cp_params) @@ -599,6 +727,8 @@ def test_rayleigh_for_negative_scale(self, scale_dtype): ) ) class TestDistributionsStandardCauchy(RandomDistributionsTestCase): + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported def test_standard_cauchy(self): self.check_distribution("standard_cauchy", {}) @@ -606,11 +736,13 @@ def test_standard_cauchy(self): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], } ) ) class TestDistributionsStandardExponential(RandomDistributionsTestCase): + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported def test_standard_exponential(self): self.check_distribution("standard_exponential", {}) @@ -618,27 +750,53 @@ def test_standard_exponential(self): @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], "shape_shape": [(), (3, 2)], } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsStandardGamma(RandomDistributionsTestCase): - @_loops.for_float_dtypes("shape_dtype") - def test_standard_gamma(self, shape_dtype): + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("shape_dtype") + def test_standard_gamma_legacy(self, shape_dtype): shape = numpy.ones(self.shape_shape, dtype=shape_dtype) self.check_distribution("standard_gamma", {"shape": shape}) + @pytest.mark.skip("no support of generator yet") + @testing.for_float_dtypes("dtype", no_float16=True) + @testing.for_float_dtypes("shape_dtype") + def test_standard_gamma_generator(self, shape_dtype, dtype): + shape = numpy.ones(self.shape_shape, dtype=shape_dtype) + self.check_generator_distribution( + "standard_gamma", {"shape": shape}, dtype + ) + + +class TestDistributionsStandardGammaInvalid(RandomDistributionsTestCase): + + @pytest.mark.skip("no support of generator yet") + def test_none_shape_generator(self): + with pytest.raises(TypeError): + cupy.random.default_rng(0).standard_gamma(None) + + @pytest.mark.usefixtures("allow_fall_back_on_numpy") + def test_none_shape_legacy(self): + with pytest.raises(TypeError): + _distributions.standard_gamma(None) + @testing.parameterize( *testing.product( { - "shape": [(4, 3, 2), (3, 2)], + "shape": [(4, 3, 2), (3, 2), None], } ) ) class TestDistributionsStandardNormal(RandomDistributionsTestCase): + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported def test_standard_normal(self): self.check_distribution("standard_normal", {}) @@ -652,15 +810,16 @@ def test_standard_normal(self): ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsStandardT(unittest.TestCase): +class TestDistributionsStandardT: + def check_distribution(self, dist_func, df_dtype): df = cupy.ones(self.df_shape, dtype=df_dtype) out = dist_func(df, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_float_dtypes("df_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("df_dtype") def test_standard_t(self, df_dtype): self.check_distribution(_distributions.standard_t, df_dtype) @@ -672,12 +831,15 @@ def test_standard_t(self, df_dtype): "left_shape": [(), (3, 2)], "mode_shape": [(), (3, 2)], "right_shape": [(), (3, 2)], + # "dtype": _regular_float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsTriangular(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( + + @testing.for_dtypes_combination( _regular_float_dtypes, names=["left_dtype", "mode_dtype", "right_dtype"] ) def test_triangular(self, left_dtype, mode_dtype, right_dtype): @@ -685,27 +847,29 @@ def test_triangular(self, left_dtype, mode_dtype, right_dtype): mode = numpy.full(self.mode_shape, 0, dtype=mode_dtype) right = numpy.full(self.right_shape, 2, dtype=right_dtype) self.check_distribution( - "triangular", {"left": left, "mode": mode, "right": right} + "triangular", + {"left": left, "mode": mode, "right": right}, + self.dtype, ) - @_loops.for_float_dtypes("param_dtype", no_float16=True) + @testing.for_float_dtypes("param_dtype", no_float16=True) def test_triangular_for_invalid_params(self, param_dtype): left = cupy.full(self.left_shape, 1, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, 2, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.triangular(left, mode, right, size=self.shape) left = cupy.full(self.left_shape, -2, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, -1, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.triangular(left, mode, right, size=self.shape) left = cupy.full(self.left_shape, 0, dtype=param_dtype) mode = cupy.full(self.mode_shape, 0, dtype=param_dtype) right = cupy.full(self.right_shape, 0, dtype=param_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): _distributions.triangular(left, mode, right, size=self.shape) @@ -720,8 +884,10 @@ def test_triangular_for_invalid_params(self, param_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsUniform(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["low_dtype", "high_dtype"] + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_dtypes_combination( + _float_dtypes, names=["low_dtype", "high_dtype"] ) def test_uniform(self, low_dtype, high_dtype): low = numpy.ones(self.low_shape, dtype=low_dtype) @@ -735,24 +901,28 @@ def test_uniform(self, low_dtype, high_dtype): "shape": [(4, 3, 2), (3, 2)], "mu_shape": [(), (3, 2)], "kappa_shape": [(), (3, 2)], + # "dtype": _float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestDistributionsVonmises(unittest.TestCase): - def check_distribution(self, dist_func, mu_dtype, kappa_dtype): +class TestDistributionsVonmises: + + def check_distribution(self, dist_func, mu_dtype, kappa_dtype, dtype): mu = cupy.ones(self.mu_shape, dtype=mu_dtype) kappa = cupy.ones(self.kappa_shape, dtype=kappa_dtype) out = dist_func(mu, kappa, self.shape) - self.assertEqual(self.shape, out.shape) - # numpy and dpdp output dtype is float64 - self.assertEqual(out.dtype, numpy.float64) + assert self.shape == out.shape + # assert out.dtype == dtype - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["mu_dtype", "kappa_dtype"] + @testing.for_dtypes_combination( + _float_dtypes, names=["mu_dtype", "kappa_dtype"] ) def test_vonmises(self, mu_dtype, kappa_dtype): - self.check_distribution(_distributions.vonmises, mu_dtype, kappa_dtype) + self.check_distribution( + _distributions.vonmises, mu_dtype, kappa_dtype, self.dtype + ) @testing.parameterize( @@ -761,18 +931,23 @@ def test_vonmises(self, mu_dtype, kappa_dtype): "shape": [(4, 3, 2), (3, 2)], "mean_shape": [(), (3, 2)], "scale_shape": [(), (3, 2)], + # "dtype": _regular_float_dtypes, # to escape timeout + "dtype": [None], # no dtype supported } ) ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsWald(RandomDistributionsTestCase): - @_loops.for_dtypes_combination( - _regular_float_dtypes, names=["mean_dtype", "scale_dtype"] + + @testing.for_dtypes_combination( + _float_dtypes, names=["mean_dtype", "scale_dtype"] ) def test_wald(self, mean_dtype, scale_dtype): mean = numpy.full(self.mean_shape, 3, dtype=mean_dtype) scale = numpy.full(self.scale_shape, 3, dtype=scale_dtype) - self.check_distribution("wald", {"mean": mean, "scale": scale}) + self.check_distribution( + "wald", {"mean": mean, "scale": scale}, self.dtype + ) @testing.parameterize( @@ -785,20 +960,24 @@ def test_wald(self, mean_dtype, scale_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsWeibull(RandomDistributionsTestCase): - @_loops.for_float_dtypes("a_dtype") + + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_weibull(self, a_dtype): a = numpy.ones(self.a_shape, dtype=a_dtype) self.check_distribution("weibull", {"a": a}) - @_loops.for_float_dtypes("a_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_weibull_for_inf_a(self, a_dtype): a = numpy.full(self.a_shape, numpy.inf, dtype=a_dtype) self.check_distribution("weibull", {"a": a}) - @_loops.for_float_dtypes("a_dtype") + # @testing.for_float_dtypes("dtype", no_float16=True) # no dtype supported + @testing.for_float_dtypes("a_dtype") def test_weibull_for_negative_a(self, a_dtype): a = numpy.full(self.a_shape, -0.5, dtype=a_dtype) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): cp_params = {"a": cupy.asarray(a)} getattr(_distributions, "weibull")(size=self.shape, **cp_params) @@ -813,8 +992,9 @@ def test_weibull_for_negative_a(self, a_dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestDistributionsZipf(RandomDistributionsTestCase): - @_loops.for_dtypes([numpy.int32, numpy.int64], "dtype") - @_loops.for_float_dtypes("a_dtype") - def test_zipf(self, a_dtype, dtype): + + # @testing.for_dtypes([numpy.int32, numpy.int64], "dtype") # no dtype supported + @testing.for_float_dtypes("a_dtype") + def test_zipf(self, a_dtype): a = numpy.full(self.a_shape, 2, dtype=a_dtype) self.check_distribution("zipf", {"a": a}) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_generator.py b/dpnp/tests/third_party/cupy/random_tests/test_generator.py new file mode 100644 index 000000000000..7f8ca2f60cbf --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_generator.py @@ -0,0 +1,1293 @@ +import functools +import os +import threading +import unittest + +import numpy +import pytest + +import dpnp as cupy + +# from cupy import cuda +# from cupy.cuda import runtime +# from cupy.random import _generator +from dpnp.tests.third_party.cupy import testing +from dpnp.tests.third_party.cupy.testing import _condition, _hypothesis + +from . import common_distributions + +pytest.skip("random.generator() is not supported yet", allow_module_level=True) + + +def numpy_cupy_equal_continuous_distribution(significance_level, name="xp"): + """Decorator that tests the distributions of NumPy samples and CuPy ones. + + Args: + significance_level (float): The test fails if p-value is lower than + this argument. + name(str): Argument name whose value is either + ``numpy`` or ``cupy`` module. + + Decorated test fixture is required to return samples from the same + distribution even if ``xp`` is ``numpy`` or ``cupy``. + + """ + + def decorator(impl): + @functools.wraps(impl) + def test_func(self, *args, **kw): + kw[name] = cupy + cupy_result = impl(self, *args, **kw) + + kw[name] = numpy + numpy_result = impl(self, *args, **kw) + + assert cupy_result is not None + assert numpy_result is not None + d_plus, d_minus, p_value = ( + common_distributions.two_sample_Kolmogorov_Smirnov_test( + cupy.asnumpy(cupy_result), numpy_result + ) + ) + if p_value < significance_level: + message = """Rejected null hypothesis: +p: %f +D+ (cupy < numpy): %f +D- (cupy > numpy): %f""" % ( + p_value, + d_plus, + d_minus, + ) + raise AssertionError(message) + + return test_func + + return decorator + + +def _get_size(size): + # CuPy returns an ndarray of shape () even if size=None. + # cf. NumPy returns a Python scalar if size=None. + if size is None: + return () + return cupy._core.get_size(size) + + +class RandomGeneratorTestCase(common_distributions.BaseGeneratorTestCase): + + target_method = None + + def get_rng(self, xp, seed): + return xp.random.RandomState(seed=seed) + + def set_rng_seed(self, seed): + self.rng.seed(seed) + + +def _xp_random(xp, method_name): + method = getattr(xp.random.RandomState(), method_name) + if xp == cupy: + return method + + def f(*args, **kwargs): + dtype = kwargs.pop("dtype", None) + ret = method(*args, **kwargs) + if dtype is not None: + ret = ret.astype(dtype, copy=False) + return ret + + return f + + +@testing.fix_random() +class TestRandomState(unittest.TestCase): + + def setUp(self): + self.rs = _generator.RandomState(seed=testing.generate_seed()) + + def check_seed(self, seed): + rs = self.rs + + rs.seed(seed) + xs1 = [rs.uniform() for _ in range(100)] + + rs.seed(seed) + xs2 = [rs.uniform() for _ in range(100)] + + rs.seed(seed) + rs.seed(None) + xs3 = [rs.uniform() for _ in range(100)] + + # Random state must be reproducible + assert xs1 == xs2 + # Random state must be initialized randomly with seed=None + assert xs1 != xs3 + + @testing.for_int_dtypes() + def test_seed_not_none(self, dtype): + self.check_seed(dtype(0)) + + @testing.for_dtypes([numpy.complex128]) + def test_seed_invalid_type_complex(self, dtype): + with self.assertRaises(TypeError): + self.rs.seed(dtype(0)) + + @testing.for_float_dtypes() + def test_seed_invalid_type_float(self, dtype): + with self.assertRaises(TypeError): + self.rs.seed(dtype(0)) + + def test_array_seed(self): + self.check_seed(numpy.random.randint(0, 2**31, size=40)) + + def test_methods(self): + methods = [ + cuda.curand.CURAND_RNG_PSEUDO_DEFAULT, + cuda.curand.CURAND_RNG_PSEUDO_MRG32K3A, + cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937, + cupy.cuda.curand.CURAND_RNG_PSEUDO_PHILOX4_32_10, + cupy.cuda.curand.CURAND_RNG_PSEUDO_MTGP32, + cupy.cuda.curand.CURAND_RNG_PSEUDO_XORWOW, + ] + + for method in methods: + if ( + runtime.is_hip + and method == cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937 + ): + # hipRAND fails for MT19937 with the status code 1000, + # HIPRAND_STATUS_NOT_IMPLEMENTED. We use `pytest.raises` here + # so that we will be able to find it once hipRAND implement + # MT19937 as the imperative `pytest.xfail` immediately rewinds + # the control flow and does not run the test. + with pytest.raises(KeyError) as e: + rs = cupy.random.RandomState(method=method) + assert e.value.args == (1000,) + continue + rs = cupy.random.RandomState(method=method) + rs.normal() + + +@testing.parameterize(*common_distributions.beta_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestBeta(common_distributions.Beta, RandomGeneratorTestCase): + pass + + +@testing.parameterize( + {"n": 5, "p": 0.5}, + {"n": 5, "p": 0.0}, + {"n": 5, "p": 1.0}, +) +@testing.fix_random() +class TestBinomial(RandomGeneratorTestCase): + # TODO(niboshi): + # Test soundness of distribution. + # Currently only reprocibility is checked. + + target_method = "binomial" + + def test_binomial(self): + self.generate(n=self.n, p=self.p, size=(3, 2)) + + +@testing.parameterize(*common_distributions.chisquare_params) +@testing.fix_random() +class TestChisquare(common_distributions.Chisquare, RandomGeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.dirichlet_params) +@testing.fix_random() +class TestDirichlet(common_distributions.Dirichlet, RandomGeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.exponential_params) +@testing.fix_random() +class TestExponential( + common_distributions.Exponential, RandomGeneratorTestCase +): + pass + + +@testing.parameterize(*common_distributions.f_params) +@testing.fix_random() +class TestF(common_distributions.F, RandomGeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.gamma_params) +@testing.fix_random() +class TestGamma(common_distributions.Gamma, RandomGeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.geometric_params) +@testing.fix_random() +class TestGeometric(common_distributions.Geometric, RandomGeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.hypergeometric_params) +@testing.fix_random() +class TestHypergeometric( + common_distributions.Hypergeometric, RandomGeneratorTestCase +): + pass + + +@testing.fix_random() +class TestLaplace(RandomGeneratorTestCase): + + target_method = "laplace" + + def test_laplace_1(self): + self.generate() + + def test_laplace_2(self): + self.generate(0.0, 1.0, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_laplace_ks_1(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_laplace_ks_2(self, dtype): + self.check_ks(0.05)(2.3, 4.5, size=2000, dtype=dtype) + + +@testing.fix_random() +class TestLogistic(RandomGeneratorTestCase): + + target_method = "logistic" + + def test_logistic_1(self): + self.generate() + + def test_logistic_2(self): + self.generate(0.0, 1.0, size=(3, 2)) + + @testing.slow + @_condition.repeat(10) + def test_standard_logistic_isfinite(self): + x = self.generate(size=10**7) + assert cupy.isfinite(x).all() + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_logistic_ks_1(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_logistic_ks_2(self, dtype): + self.check_ks(0.05)(2.3, 4.5, size=2000, dtype=dtype) + + +@testing.parameterize( + *[ + {"args": (0.0, 1.0), "size": None}, + {"args": (10.0, 20.0), "size": None}, + {"args": (0.0, 1.0), "size": 10}, + {"args": (0.0, 1.0), "size": (1, 2, 3)}, + {"args": (0.0, 1.0), "size": 3}, + {"args": (0.0, 1.0), "size": (3, 3)}, + {"args": (0.0, 1.0), "size": ()}, + ] +) +@testing.fix_random() +class TestLogNormal(RandomGeneratorTestCase): + + target_method = "lognormal" + + def check_lognormal(self, dtype): + vals = self.generate_many( + self.args[0], self.args[1], self.size, dtype, _count=10 + ) + + shape = _get_size(self.size) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype == dtype + assert val.shape == shape + assert (0 <= val).all() + + def test_lognormal_float(self): + self.check_lognormal(float) + + def test_lognormal_float32(self): + self.check_lognormal(numpy.float32) + + def test_lognormal_float64(self): + self.check_lognormal(numpy.float64) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_lognormal_ks(self, dtype): + self.check_ks(0.05)(*self.args, size=self.size, dtype=dtype) + + +@testing.parameterize(*common_distributions.logseries_params) +@testing.fix_random() +class TestLogseries(common_distributions.Logseries, RandomGeneratorTestCase): + pass + + +@testing.parameterize( + *[ + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": None, + "tol": 1e-6, + }, + { + "args": ([10.0, 10.0], [[20.0, 10.0], [10.0, 20.0]]), + "size": None, + "tol": 1e-6, + }, + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": 10, + "tol": 1e-6, + }, + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": (1, 2, 3), + "tol": 1e-6, + }, + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": 3, + "tol": 1e-6, + }, + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": (3, 3), + "tol": 1e-6, + }, + { + "args": ([0.0, 0.0], [[1.0, 0.0], [0.0, 1.0]]), + "size": (), + "tol": 1e-6, + }, + ] +) +@testing.fix_random() +class TestMultivariateNormal(RandomGeneratorTestCase): + + target_method = "multivariate_normal" + + def check_multivariate_normal(self, dtype): + vals = self.generate_many( + mean=self.args[0], + cov=self.args[1], + size=self.size, + tol=self.tol, + dtype=dtype, + _count=10, + ) + + shape = _get_size(self.size) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype == dtype + assert val.shape == shape + (2,) + + def test_multivariate_normal_float32(self): + self.check_multivariate_normal(numpy.float32) + + def test_multivariate_normal_float64(self): + self.check_multivariate_normal(numpy.float64) + + # TODO(kataoka): add distribution test + + +@testing.parameterize( + {"n": 5, "p": 0.5}, +) +@testing.fix_random() +class TestNegativeBinomial(RandomGeneratorTestCase): + target_method = "negative_binomial" + + def test_negative_binomial(self): + self.generate(n=self.n, p=self.p, size=(3, 2)) + + # TODO(kataoka): add distribution test + + +@testing.parameterize( + {"df": 1.5, "nonc": 2.0}, + {"df": 2.0, "nonc": 0.0}, +) +@testing.fix_random() +class TestNoncentralChisquare(RandomGeneratorTestCase): + + target_method = "noncentral_chisquare" + + def test_noncentral_chisquare(self): + self.generate(df=self.df, nonc=self.nonc, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_noncentral_chisquare_ks(self, dtype): + self.check_ks(0.05)(self.df, self.nonc, size=2000, dtype=dtype) + + +@testing.parameterize( + {"dfnum": 2.0, "dfden": 3.0, "nonc": 4.0}, + {"dfnum": 2.5, "dfden": 1.5, "nonc": 0.0}, +) +@testing.fix_random() +class TestNoncentralF(RandomGeneratorTestCase): + + target_method = "noncentral_f" + + def test_noncentral_f(self): + self.generate( + dfnum=self.dfnum, dfden=self.dfden, nonc=self.nonc, size=(3, 2) + ) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_noncentral_f_ks(self, dtype): + self.check_ks(0.05)( + self.dfnum, self.dfden, self.nonc, size=2000, dtype=dtype + ) + + +@testing.parameterize( + *[ + {"args": (0.0, 1.0), "size": None}, + {"args": (10.0, 20.0), "size": None}, + {"args": (0.0, 1.0), "size": 10}, + {"args": (0.0, 1.0), "size": (1, 2, 3)}, + {"args": (0.0, 1.0), "size": 3}, + {"args": (0.0, 1.0), "size": (3, 3)}, + {"args": (0.0, 1.0), "size": ()}, + ] +) +@testing.fix_random() +class TestNormal(RandomGeneratorTestCase): + + target_method = "normal" + + def check_normal(self, dtype): + vals = self.generate_many( + self.args[0], self.args[1], self.size, dtype, _count=10 + ) + + shape = _get_size(self.size) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype == dtype + assert val.shape == shape + + def test_normal_float32(self): + self.check_normal(numpy.float32) + + def test_normal_float64(self): + self.check_normal(numpy.float64) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_normal_ks(self, dtype): + self.check_ks(0.05)(*self.args, size=self.size, dtype=dtype) + + +@testing.parameterize( + {"a": 1.0}, + {"a": 3.0}, + {"a": 10.0}, +) +@testing.fix_random() +class TestPareto(RandomGeneratorTestCase): + + target_method = "pareto" + + def test_pareto(self): + self.generate(a=self.a, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_pareto_ks(self, dtype): + self.check_ks(0.05)(a=self.a, size=2000, dtype=dtype) + + +@testing.parameterize(*common_distributions.poisson_params) +@testing.fix_random() +class TestPoisson(common_distributions.Poisson, RandomGeneratorTestCase): + pass + + +@testing.parameterize( + {"df": 1.0}, + {"df": 3.0}, + {"df": 10.0}, +) +@testing.fix_random() +class TestStandardT(RandomGeneratorTestCase): + + target_method = "standard_t" + + def test_standard_t(self): + self.generate(df=self.df, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_standard_t_ks(self, dtype): + self.check_ks(0.05)(df=self.df, size=2000, dtype=dtype) + + +@testing.parameterize( + *[ + {"size": None}, + {"size": 10}, + {"size": (1, 2, 3)}, + {"size": 3}, + {"size": ()}, + ] +) +@testing.fix_random() +class TestRandomSample(unittest.TestCase): + + def setUp(self): + self.rs = _generator.RandomState(seed=testing.generate_seed()) + + def check_random_sample(self, dtype): + vals = [self.rs.random_sample(self.size, dtype) for _ in range(10)] + + shape = _get_size(self.size) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype == dtype + assert val.shape == shape + assert (0 <= val).all() + assert (val < 1).all() + + def test_random_sample_float32(self): + self.check_random_sample(numpy.float32) + + def test_random_sample_float64(self): + self.check_random_sample(numpy.float64) + + +@testing.fix_random() +class TestRandomSampleDistrib(unittest.TestCase): + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + @numpy_cupy_equal_continuous_distribution(0.05) + def test_random_sample_ks(self, xp, dtype): + return _xp_random(xp, "random_sample")(size=2000, dtype=dtype) + + +@testing.fix_random() +class TestRandAndRandN(unittest.TestCase): + + def setUp(self): + self.rs = _generator.RandomState(seed=testing.generate_seed()) + + def test_rand_invalid_argument(self): + with self.assertRaises(TypeError): + self.rs.rand(1, 2, 3, unnecessary="unnecessary_argument") + + def test_randn_invalid_argument(self): + with self.assertRaises(TypeError): + self.rs.randn(1, 2, 3, unnecessary="unnecessary_argument") + + +@testing.parameterize(*common_distributions.power_params) +@testing.fix_random() +class TestPower(common_distributions.Power, RandomGeneratorTestCase): + pass + + +@testing.parameterize( + {"scale": 1.0}, + {"scale": 3.0}, +) +@testing.fix_random() +class TestRayleigh(RandomGeneratorTestCase): + + target_method = "rayleigh" + + def test_rayleigh(self): + self.generate(scale=self.scale, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_rayleigh_ks(self, dtype): + self.check_ks(0.05)(scale=self.scale, size=2000, dtype=dtype) + + +@testing.fix_random() +class TestStandardCauchy(RandomGeneratorTestCase): + + target_method = "standard_cauchy" + + def test_standard_cauchy(self): + self.generate(size=(3, 2)) + + @testing.slow + @_condition.repeat(10) + def test_standard_cauchy_isfinite(self): + x = self.generate(size=10**7) + assert cupy.isfinite(x).all() + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_standard_cauchy_ks(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + +@testing.parameterize(*common_distributions.standard_gamma_params) +@testing.fix_random() +class TestStandardGamma( + common_distributions.StandardGamma, RandomGeneratorTestCase +): + pass + + +@testing.fix_random() +class TestInterval(RandomGeneratorTestCase): + + target_method = "_interval" + + def test_zero(self): + shape = (2, 3) + vals = self.generate_many(0, shape, _count=10) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == shape + assert (val == 0).all() + + def test_shape_zero(self): + mx = 10 + vals = self.generate_many(mx, None, _count=10) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == () + assert (0 <= val).all() + assert (val <= mx).all() + # TODO(niboshi): Distribution test + + def test_shape_one_dim(self): + mx = 10 + size = 20 + vals = self.generate_many(mx, size, _count=10) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == (size,) + assert (0 <= val).all() + assert (val <= mx).all() + # TODO(niboshi): Distribution test + + def test_shape_multi_dim(self): + mx = 10 + shape = (1, 2) + vals = self.generate_many(mx, shape, _count=10) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == shape + assert (0 <= val).all() + assert (val <= mx).all() + # TODO(niboshi): Distribution test + + def test_bound_1(self): + vals = self.generate_many(10, (2, 3), _count=10) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == (2, 3) + assert (0 <= val).all() + assert (val <= 10).all() + + def test_bound_2(self): + vals = self.generate_many(2, None, _count=20) + for val in vals: + assert isinstance(val, cupy.ndarray) + assert val.dtype.kind in "iu" + assert val.shape == () + assert (0 <= val).all() + assert (val <= 2).all() + + @_condition.repeat(3, 10) + def test_goodness_of_fit(self): + mx = 5 + trial = 300 + vals = self.generate_many(mx, None, _count=trial) + vals = [val.get() for val in vals] + counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0] + expected = numpy.array([float(trial) / (mx + 1)] * (mx + 1)) + assert _hypothesis.chi_square_test(counts, expected) + + @_condition.repeat(3) + def test_goodness_of_fit_2(self): + mx = 5 + vals = self.generate(mx, (5, 5)).get() + counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0] + expected = numpy.array([float(vals.size) / (mx + 1)] * (mx + 1)) + assert _hypothesis.chi_square_test(counts, expected) + + +@testing.fix_random() +class TestTomaxint(RandomGeneratorTestCase): + + target_method = "tomaxint" + + def test_tomaxint_none(self): + x = self.generate() + assert x.shape == () + assert (0 <= x).all() + assert (x <= cupy.iinfo(cupy.int_).max).all() + + def test_tomaxint_int(self): + x = self.generate(3) + assert x.shape == (3,) + assert (0 <= x).all() + assert (x <= cupy.iinfo(cupy.int_).max).all() + + def test_tomaxint_tuple(self): + x = self.generate((2, 3)) + assert x.shape == (2, 3) + assert (0 <= x).all() + assert (x <= cupy.iinfo(cupy.int_).max).all() + + +@testing.parameterize( + {"a": 3, "size": 2, "p": None}, + {"a": 3, "size": 2, "p": [0.3, 0.3, 0.4]}, + {"a": 3, "size": (5, 5), "p": [0.3, 0.3, 0.4]}, + {"a": 3, "size": (5, 5), "p": numpy.array([0.3, 0.3, 0.4])}, + {"a": 3, "size": (), "p": None}, + {"a": numpy.array([0.0, 1.0, 2.0]), "size": 2, "p": [0.3, 0.3, 0.4]}, + {"a": 0, "size": 0, "p": None}, + {"a": numpy.array([]), "size": 0, "p": None}, +) +@testing.fix_random() +class TestChoice1(RandomGeneratorTestCase): + + target_method = "choice" + + def test_dtype_shape(self): + v = self.generate(a=self.a, size=self.size, p=self.p) + if isinstance(self.size, int): + expected_shape = (self.size,) + else: + expected_shape = self.size + if isinstance(self.a, numpy.ndarray): + expected_dtype = "float" + else: + expected_dtype = "int64" + assert v.dtype == expected_dtype + assert v.shape == expected_shape + + @_condition.repeat(3, 10) + def test_bound(self): + vals = self.generate_many(a=self.a, size=self.size, p=self.p, _count=20) + vals = [val.get() for val in vals] + size_ = self.size if isinstance(self.size, tuple) else (self.size,) + if size_ == (0,): + self.skipTest("no bound check for empty `random.choice`") + for val in vals: + assert val.shape == size_ + assert min(val.min() for val in vals) == 0 + assert max(val.max() for val in vals) == 2 + + +@testing.parameterize( + {"a": [0, 1, 2], "size": 2, "p": [0.3, 0.3, 0.4]}, +) +@testing.fix_random() +class TestChoice2(RandomGeneratorTestCase): + + target_method = "choice" + + def test_dtype_shape(self): + v = self.generate(a=self.a, size=self.size, p=self.p) + if isinstance(self.size, int): + expected_shape = (self.size,) + else: + expected_shape = self.size + if isinstance(self.a, numpy.ndarray): + expected_dtype = "float" + else: + expected_dtype = "int" + assert v.dtype == expected_dtype + assert v.shape == expected_shape + + @_condition.repeat(3, 10) + def test_bound(self): + vals = self.generate_many(a=self.a, size=self.size, p=self.p, _count=20) + vals = [val.get() for val in vals] + size_ = self.size if isinstance(self.size, tuple) else (self.size,) + for val in vals: + assert val.shape == size_ + assert min(val.min() for val in vals) == 0 + assert max(val.max() for val in vals) == 2 + + +@testing.fix_random() +class TestChoiceChi(RandomGeneratorTestCase): + + target_method = "choice" + + @_condition.repeat_with_success_at_least(10, 9) + def test_goodness_of_fit(self): + trial = 100 + vals = self.generate_many(3, 1, True, [0.3, 0.3, 0.4], _count=trial) + vals = [val.get() for val in vals] + counts = numpy.histogram(vals, bins=numpy.arange(4))[0] + expected = numpy.array([30, 30, 40]) + assert _hypothesis.chi_square_test(counts, expected) + + @_condition.repeat(3, 10) + def test_goodness_of_fit_2(self): + vals = self.generate(3, (5, 20), True, [0.3, 0.3, 0.4]).get() + counts = numpy.histogram(vals, bins=numpy.arange(4))[0] + expected = numpy.array([30, 30, 40]) + assert _hypothesis.chi_square_test(counts, expected) + + +@testing.fix_random() +class TestChoiceMultinomial(unittest.TestCase): + + @_condition.repeat(3, 10) + @testing.for_float_dtypes() + @testing.numpy_cupy_allclose(atol=0.02) + def test_choice_multinomial(self, xp, dtype): + p = xp.array([0.5, 0.25, 0.125, 0.125], dtype) + trial = 10000 + x = xp.random.choice(len(p), trial, p=p) + y = xp.bincount(x).astype("f") / trial + return y + + +@testing.parameterize( + {"a": 3.1, "size": 1, "p": [0.1, 0.1, 0.8]}, + {"a": None, "size": 1, "p": [0.1, 0.1, 0.8]}, + {"a": -3, "size": 1, "p": [0.1, 0.1, 0.8]}, + {"a": [[0, 1], [2, 3]], "size": 1, "p": [[0.1, 0.2], [0.3, 0.4]]}, + {"a": [[0, 1], [2, 3]], "size": 1, "p": [0.3, 0.7]}, + {"a": [], "size": 1, "p": [0.1, 0.1, 0.8]}, + {"a": 4, "size": 1, "p": [[0.1, 0.2], [0.3, 0.4]]}, + {"a": 2, "size": 1, "p": [0.1, 0.1, 0.8]}, + {"a": 3, "size": 1, "p": [-0.1, 0.3, 0.8]}, + {"a": 3, "size": 1, "p": [0.1, 0.1, 0.7]}, +) +@testing.fix_random() +class TestChoiceFailure(unittest.TestCase): + + def setUp(self): + self.rs = _generator.RandomState(seed=testing.generate_seed()) + + def test_choice_invalid_value(self): + with self.assertRaises(ValueError): + self.rs.choice(a=self.a, size=self.size, p=self.p) + + +@testing.parameterize( + {"a": 5, "size": 2}, + {"a": 5, "size": (2, 2)}, + {"a": 5, "size": ()}, + {"a": numpy.array([0.0, 2.0, 4.0]), "size": 2}, +) +@testing.fix_random() +class TestChoiceReplaceFalse(RandomGeneratorTestCase): + + target_method = "choice" + + def test_dtype_shape(self): + v = self.generate(a=self.a, size=self.size, replace=False) + if isinstance(self.size, int): + expected_shape = (self.size,) + else: + expected_shape = self.size + if isinstance(self.a, numpy.ndarray): + expected_dtype = "float" + else: + expected_dtype = "int" + assert v.dtype == expected_dtype + assert v.shape == expected_shape + + @_condition.repeat(3, 10) + def test_bound(self): + val = self.generate(a=self.a, size=self.size, replace=False).get() + size = self.size if isinstance(self.size, tuple) else (self.size,) + assert val.shape == size + assert (0 <= val).all() + assert (val < 5).all() + val = numpy.asarray(val) + assert numpy.unique(val).size == val.size + + +@testing.fix_random() +class TestGumbel(RandomGeneratorTestCase): + + target_method = "gumbel" + + def test_gumbel_1(self): + self.generate() + + def test_gumbel_2(self): + self.generate(0.0, 1.0, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_gumbel_ks_1(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_gumbel_ks_2(self, dtype): + self.check_ks(0.05)(2.3, 4.5, size=2000, dtype=dtype) + + +@testing.fix_random() +class TestRandint(RandomGeneratorTestCase): + # TODO(niboshi): + # Test soundness of distribution. + # Currently only reprocibility is checked. + + target_method = "randint" + + def test_randint_1(self): + self.generate(3) + + def test_randint_2(self): + self.generate(3, 4, size=(3, 2)) + + def test_randint_empty1(self): + self.generate(3, 10, size=0) + + def test_randint_empty2(self): + self.generate(3, size=(4, 0, 5)) + + def test_randint_overflow(self): + self.generate(numpy.int8(-100), numpy.int8(100)) + + def test_randint_float1(self): + self.generate(-1.2, 3.4, 5) + + def test_randint_float2(self): + self.generate(6.7, size=(2, 3)) + + def test_randint_int64_1(self): + self.generate(2**34, 2**40, 3, dtype="q") + + def test_randint_array(self): + self.generate([[[-1], [0]], [[-2], [1]], [[3], [4]]], [[10, 11, 12]]) + + +@testing.fix_random() +class TestUniform(RandomGeneratorTestCase): + + target_method = "uniform" + + def test_uniform_1(self): + self.generate() + + def test_uniform_2(self): + self.generate(-4.2, 2.4, size=(3, 2)) + + def test_uniform_broadcast(self): + self.generate([[2, 3]], [4]) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_uniform_ks_1(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_uniform_ks_2(self, dtype): + self.check_ks(0.05)(-4.2, 2.4, size=2000, dtype=dtype) + + +@testing.parameterize( + {"mu": 0.0, "kappa": 1.0}, + {"mu": 3.0, "kappa": 3.0}, + {"mu": 3.0, "kappa": 1.0}, +) +@testing.fix_random() +class TestVonmises(RandomGeneratorTestCase): + + target_method = "vonmises" + + def test_vonmises(self): + self.generate(mu=self.mu, kappa=self.kappa, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_vonmises_ks(self, dtype): + self.check_ks(0.05)(self.mu, self.kappa, size=2000, dtype=dtype) + + +@testing.parameterize( + {"mean": 1.0, "scale": 3.0}, + {"mean": 3.0, "scale": 3.0}, + {"mean": 3.0, "scale": 1.0}, +) +@testing.fix_random() +class TestWald(RandomGeneratorTestCase): + + target_method = "wald" + + def test_wald(self): + self.generate(mean=self.mean, scale=self.scale, size=(3, 2)) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_wald_ks(self, dtype): + self.check_ks(0.05)(self.mean, self.scale, size=2000, dtype=dtype) + + +@testing.parameterize( + {"a": 0.5}, + {"a": 1.0}, + {"a": 3.0}, + {"a": numpy.inf}, +) +@testing.fix_random() +class TestWeibull(RandomGeneratorTestCase): + + target_method = "weibull" + + def test_weibull(self): + self.generate(a=self.a, size=(3, 2)) + + def test_weibull_size_none(self): + self.generate([[0.5, 1.0, 3.0]], size=None) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_weibull_ks(self, dtype): + self.check_ks(0.05)(a=self.a, size=2000, dtype=dtype) + + +@testing.parameterize( + {"a": 2.0}, +) +@testing.fix_random() +class TestZipf(RandomGeneratorTestCase): + + target_method = "zipf" + + def test_zipf(self): + self.generate(a=self.a, size=(3, 2)) + + # TODO(kataoka): add distribution test + + +@testing.parameterize( + {"a": 3, "size": 5}, + {"a": [1, 2, 3], "size": 5}, +) +@testing.fix_random() +class TestChoiceReplaceFalseFailure(unittest.TestCase): + + def test_choice_invalid_value(self): + for xp in (numpy, cupy): + rs = xp.random.RandomState(seed=testing.generate_seed()) + with pytest.raises(ValueError): + rs.choice(a=self.a, size=self.size, replace=False) + + +class TestResetStates(unittest.TestCase): + + def test_reset_states(self): + _generator._random_states = "dummy" + _generator.reset_states() + assert {} == _generator._random_states + + +class TestGetRandomState(unittest.TestCase): + + def setUp(self): + self.device_id = cuda.Device().id + self.rs_tmp = _generator._random_states + + def tearDown(self, *args): + _generator._random_states = self.rs_tmp + + def test_get_random_state_initialize(self): + _generator._random_states = {} + rs = _generator.get_random_state() + assert _generator._random_states[self.device_id] == rs + + def test_get_random_state_memoized(self): + _generator._random_states = { + self.device_id: "expected", + self.device_id + 1: "dummy", + } + rs = _generator.get_random_state() + assert "expected" == _generator._random_states[self.device_id] + assert "dummy" == _generator._random_states[self.device_id + 1] + assert "expected" == rs + + +class TestSetRandomState(unittest.TestCase): + + def setUp(self): + self.rs_tmp = _generator._random_states + + def tearDown(self, *args): + _generator._random_states = self.rs_tmp + + def test_set_random_state(self): + rs = _generator.RandomState() + _generator.set_random_state(rs) + assert _generator.get_random_state() is rs + + def test_set_random_state_call_multiple_times(self): + _generator.set_random_state(_generator.RandomState()) + rs = _generator.RandomState() + _generator.set_random_state(rs) + assert _generator.get_random_state() is rs + + +@testing.fix_random() +class TestStandardExponential( + common_distributions.StandardExponential, RandomGeneratorTestCase +): + pass + + +@testing.parameterize( + {"left": -1.0, "mode": 0.0, "right": 2.0}, +) +@testing.fix_random() +class TestTriangular(RandomGeneratorTestCase): + + target_method = "triangular" + + def test_triangular(self): + self.generate( + left=self.left, mode=self.mode, right=self.right, size=(3, 2) + ) + + +class TestRandomStateThreadSafe(unittest.TestCase): + + def setUp(self): + cupy.random.reset_states() + + def test_get_random_state_thread_safe(self): + def _f(func, args=()): + cupy.cuda.Device().use() + func(*args) + + seed = 10 + threads = [ + threading.Thread(target=_f, args=(cupy.random.seed, (seed,))), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + threading.Thread(target=_f, args=(cupy.random.get_random_state,)), + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + actual = cupy.random.uniform() + cupy.random.seed(seed) + expected = cupy.random.uniform() + assert actual == expected + + def test_set_random_state_thread_safe(self): + def _f(func, args=()): + cupy.cuda.Device().use() + func(*args) + + rs = cupy.random.RandomState() + threads = [ + threading.Thread( + target=_f, args=(cupy.random.set_random_state, (rs,)) + ), + threading.Thread( + target=_f, args=(cupy.random.set_random_state, (rs,)) + ), + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + assert cupy.random.get_random_state() is rs + + +class TestGetRandomState2(unittest.TestCase): + + def setUp(self): + self.rs_dict = _generator._random_states + _generator._random_states = {} + self.cupy_seed = os.getenv("CUPY_SEED") + + def tearDown(self, *args): + _generator._random_states = self.rs_dict + if self.cupy_seed is None: + os.environ.pop("CUPY_SEED", None) + else: + os.environ["CUPY_SEED"] = self.cupy_seed + + def test_get_random_state_no_cupy(self): + os.environ.pop("CUPY_SEED", None) + rvs0 = self._get_rvs_reset() + rvs1 = self._get_rvs_reset() + + self._check_different(rvs0, rvs1) + + def test_get_random_state_with_cupy(self): + rvs0 = self._get_rvs(_generator.RandomState(6)) + + os.environ["CUPY_SEED"] = "6" + rvs1 = self._get_rvs_reset() + + self._check_same(rvs0, rvs1) + + def _get_rvs(self, rs): + rvu = rs.rand(4) + rvn = rs.randn(4) + return rvu, rvn + + def _get_rvs_reset(self): + _generator.reset_states() + return self._get_rvs(_generator.get_random_state()) + + def _check_same(self, rvs0, rvs1): + for rv0, rv1 in zip(rvs0, rvs1): + testing.assert_array_equal(rv0, rv1) + + def _check_different(self, rvs0, rvs1): + for rv0, rv1 in zip(rvs0, rvs1): + for r0, r1 in zip(rv0, rv1): + assert r0 != r1 + + +class TestCheckAndGetDtype(unittest.TestCase): + + @testing.for_float_dtypes(no_float16=True) + def test_float32_64_type(self, dtype): + assert _generator._check_and_get_dtype(dtype) == numpy.dtype(dtype) + + def test_float16(self): + with self.assertRaises(TypeError): + _generator._check_and_get_dtype(numpy.float16) + + @testing.for_int_dtypes() + def test_int_type(self, dtype): + with self.assertRaises(TypeError): + _generator._check_and_get_dtype(dtype) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_generator_api.py b/dpnp/tests/third_party/cupy/random_tests/test_generator_api.py new file mode 100644 index 000000000000..74c689ce664a --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_generator_api.py @@ -0,0 +1,338 @@ +import threading +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp import random +from dpnp.tests.third_party.cupy import testing +from dpnp.tests.third_party.cupy.testing import _condition + +from . import common_distributions + +pytest.skip("random.Generator() is not supported yet", allow_module_level=True) + + +class GeneratorTestCase(common_distributions.BaseGeneratorTestCase): + + target_method = None + + def get_rng(self, xp, seed): + if xp is cupy: + return cupy.random.Generator( + random._bit_generator.Philox4x3210(seed=seed) + ) + else: + return numpy.random.Generator(numpy.random.MT19937(seed)) + + def set_rng_seed(self, seed): + self.rng.bit_generator = random._bit_generator.Philox4x3210(seed=seed) + + +class InvalidOutsMixin: + + def invalid_dtype_out(self, **kwargs): + out = cupy.zeros((3, 2), dtype=cupy.float32) + with pytest.raises(TypeError): + self.generate(size=(3, 2), out=out, **kwargs) + + def invalid_contiguity(self, **kwargs): + out = cupy.zeros((4, 6), dtype=cupy.float64)[0:3:, 0:2:] + with pytest.raises(ValueError): + self.generate(size=(3, 2), out=out, **kwargs) + + def invalid_shape(self, **kwargs): + out = cupy.zeros((3, 3), dtype=cupy.float64) + with pytest.raises(ValueError): + self.generate(size=(3, 2), out=out, **kwargs) + + def test_invalid_dtype_out(self): + self.invalid_dtype_out() + + def test_invalid_contiguity(self): + self.invalid_contiguity() + + def test_invalid_shape(self): + self.invalid_shape() + + +@testing.parameterize(*common_distributions.uniform_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestUniform(common_distributions.Uniform, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.exponential_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestExponential(common_distributions.Exponential, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.poisson_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestPoisson(common_distributions.Poisson, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.binomial_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestBinomial(common_distributions.Binomial, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.beta_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestBeta(common_distributions.Beta, GeneratorTestCase): + pass + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestStandardExponential( + InvalidOutsMixin, + common_distributions.StandardExponential, + GeneratorTestCase, +): + pass + + +@testing.parameterize(*common_distributions.gamma_params) +@testing.fix_random() +class TestGamma( + common_distributions.Gamma, + GeneratorTestCase, +): + pass + + +@testing.parameterize(*common_distributions.standard_gamma_params) +@testing.fix_random() +class TestStandardGamma( + common_distributions.StandardGamma, + GeneratorTestCase, +): + pass + + +@testing.fix_random() +class TestStandardGammaInvalid(InvalidOutsMixin, GeneratorTestCase): + + target_method = "standard_gamma" + + def test_invalid_dtype_out(self): + self.invalid_dtype_out(shape=1.0) + + def test_invalid_contiguity(self): + self.invalid_contiguity(shape=1.0) + + out = cupy.zeros((4, 6), order="F", dtype=cupy.float64) + with pytest.raises(ValueError): + self.generate(size=(4, 6), out=out, shape=1.0) + + def test_invalid_shape(self): + self.invalid_shape(shape=1.0) + + def test_invalid_dtypes(self): + for dtype in "bhiqleFD": + with pytest.raises(TypeError): + self.generate(size=(3, 2), shape=1.0, dtype=dtype) + + +@testing.fix_random() +class TestStandardGammaEmpty(GeneratorTestCase): + + target_method = "standard_gamma" + + def test_empty_shape(self): + y = self.generate(shape=cupy.empty((1, 0))) + assert y.shape == (1, 0) + + def test_empty_size(self): + y = self.generate(1.0, size=(1, 0)) + assert y.shape == (1, 0) + + def test_empty_out(self): + out = cupy.empty((1, 0)) + y = self.generate(cupy.empty((1, 0)), out=out) + assert y is out + assert y.shape == (1, 0) + + +@testing.with_requires("numpy>=1.17.0") +@testing.parameterize(*common_distributions.standard_normal_params) +@testing.fix_random() +class TestStandardNormal( + common_distributions.StandardNormal, GeneratorTestCase +): + pass + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestStandardNormalInvalid(InvalidOutsMixin, GeneratorTestCase): + + target_method = "standard_normal" + + def test_invalid_dtypes(self): + for dtype in "bhiqleFD": + with pytest.raises(TypeError): + self.generate(size=(3, 2), dtype=dtype) + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestIntegers(GeneratorTestCase): + target_method = "integers" + + def test_integers_1(self): + self.generate(3) + + def test_integers_2(self): + self.generate(3, 4, size=(3, 2)) + + def test_integers_empty1(self): + self.generate(3, 10, size=0) + + def test_integers_empty2(self): + self.generate(3, size=(4, 0, 5)) + + def test_integers_overflow(self): + self.generate(numpy.int8(-100), numpy.int8(100)) + + def test_integers_float1(self): + self.generate(-1.2, 3.4, 5) + + def test_integers_float2(self): + self.generate(6.7, size=(2, 3)) + + def test_integers_int64_1(self): + self.generate(2**34, 2**40, 3) + + @_condition.repeat_with_success_at_least(10, 3) + def test_integers_ks(self): + self.check_ks(0.05)(low=100, high=1000, size=2000) + + @_condition.repeat_with_success_at_least(10, 3) + def test_integers_ks_low(self): + self.check_ks(0.05)(low=100, size=2000) + + @_condition.repeat_with_success_at_least(10, 3) + def test_integers_ks_large(self): + self.check_ks(0.05)(low=2**34, high=2**40, size=2000) + + @_condition.repeat_with_success_at_least(10, 3) + def test_integers_ks_large2(self): + self.check_ks(0.05)(2**40, size=2000) + + +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestRandom(InvalidOutsMixin, GeneratorTestCase): + # TODO(niboshi): + # Test soundness of distribution. + # Currently only reprocibility is checked. + + target_method = "random" + + def test_random(self): + self.generate(3) + + @testing.for_dtypes("fd") + @_condition.repeat_with_success_at_least(10, 3) + def test_random_ks(self, dtype): + self.check_ks(0.05)(size=2000, dtype=dtype) + + +@testing.parameterize(*common_distributions.geometric_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestGeometric(common_distributions.Geometric, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.hypergeometric_params) +@testing.with_requires("numpy>=1.17.0") +@testing.fix_random() +class TestHypergeometric( + common_distributions.Hypergeometric, GeneratorTestCase +): + pass + + +@testing.parameterize(*common_distributions.power_params) +@testing.fix_random() +class TestPower(common_distributions.Power, GeneratorTestCase): + pass + + +@testing.with_requires("numpy>=1.17.0") +@pytest.mark.skipif( + cupy.cuda.runtime.is_hip + and (int(str(cupy.cuda.runtime.runtimeGetVersion())[:3]) < 403), + reason="HIP<4.3 not supported ", +) +class TestRandomStateThreadSafe(unittest.TestCase): + + def test_default_rng_thread_safe(self): + def _f(func, args=()): + cupy.cuda.Device().use() + func(*args) + + seed = 10 + threads = [ + threading.Thread( + target=_f, args=(cupy.random.default_rng, (seed,)) + ), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + threading.Thread(target=_f, args=(cupy.random.default_rng)), + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + actual = cupy.random.default_rng(seed).standard_exponential() + expected = cupy.random.default_rng(seed).standard_exponential() + assert actual == expected + + +@testing.parameterize(*common_distributions.logseries_params) +@testing.fix_random() +class TestLogseries(common_distributions.Logseries, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.chisquare_params) +@testing.fix_random() +class TestChisquare(common_distributions.Chisquare, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.f_params) +@testing.fix_random() +class TestF(common_distributions.F, GeneratorTestCase): + pass + + +@testing.parameterize(*common_distributions.dirichlet_params) +@testing.fix_random() +class TestDrichlet(common_distributions.Dirichlet, GeneratorTestCase): + pass + + +@testing.slow +class TestLarge: + def test_large(self): + gen = random.Generator(random.XORWOW(1234)) + gen.random(2**31 + 1, dtype=cupy.int8) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_init.py b/dpnp/tests/third_party/cupy/random_tests/test_init.py new file mode 100644 index 000000000000..1f45bfd4d868 --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_init.py @@ -0,0 +1,9 @@ +import pytest + +import dpnp as cupy + + +@pytest.mark.usefixtures("allow_fall_back_on_numpy") +def test_bytes(): + out = cupy.random.bytes(10) + assert isinstance(out, bytes) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_permutations.py b/dpnp/tests/third_party/cupy/random_tests/test_permutations.py new file mode 100644 index 000000000000..eed47320e51b --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_permutations.py @@ -0,0 +1,202 @@ +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.helper import has_support_aspect64 +from dpnp.tests.third_party.cupy import testing +from dpnp.tests.third_party.cupy.testing import _condition + + +@testing.parameterize( + {"seed": None}, + {"seed": 0}, +) +@pytest.mark.skipif(not has_support_aspect64(), reason="fp64 is required") +class TestPermutations(unittest.TestCase): + + def _xp_random(self, xp): + if self.seed is None: + return xp.random + else: + pytest.skip("random.RandomState.permutation() is not supported yet") + return xp.random.RandomState(seed=self.seed) + + # Test ranks + + # TODO(niboshi): Fix xfail + @pytest.mark.xfail(reason="Explicit error types required") + def test_permutation_zero_dim(self): + for xp in (numpy, cupy): + xp_random = self._xp_random(xp) + a = testing.shaped_random((), xp) + with pytest.raises(IndexError): + xp_random.permutation(a) + + # Test same values + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_permutation_sort_1dim(self, dtype): + cupy_random = self._xp_random(cupy) + a = cupy.arange(10, dtype=dtype) + b = cupy.copy(a) + c = cupy_random.permutation(a) + testing.assert_allclose(a, b) + testing.assert_allclose(b, cupy.sort(c)) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_permutation_sort_ndim(self, dtype): + cupy_random = self._xp_random(cupy) + a = cupy.arange(15, dtype=dtype).reshape(5, 3) + b = cupy.copy(a) + c = cupy_random.permutation(a) + testing.assert_allclose(a, b) + testing.assert_allclose(b, cupy.sort(c, axis=0)) + + # Test seed + + @testing.for_all_dtypes() + def test_permutation_seed1(self, dtype): + a = testing.shaped_random((10,), cupy, dtype) + b = cupy.copy(a) + + cupy_random = self._xp_random(cupy) + if self.seed is None: + cupy_random.seed(0) + pa = cupy_random.permutation(a) + cupy_random = self._xp_random(cupy) + if self.seed is None: + cupy_random.seed(0) + pb = cupy_random.permutation(b) + + testing.assert_allclose(pa, pb) + + +@pytest.mark.skipif(not has_support_aspect64(), reason="fp64 is required") +class TestShuffle(unittest.TestCase): + + # Test ranks + + @pytest.mark.skip("no proper validation yet") + def test_shuffle_zero_dim(self): + for xp in (numpy, cupy): + a = testing.shaped_random((), xp) + with pytest.raises(TypeError): + xp.random.shuffle(a) + + # Test same values + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_shuffle_sort_1dim(self, dtype): + a = cupy.arange(10, dtype=dtype) + b = cupy.copy(a) + cupy.random.shuffle(a) + testing.assert_allclose(cupy.sort(a), b) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_shuffle_sort_ndim(self, dtype): + a = cupy.arange(15, dtype=dtype).reshape(5, 3) + b = cupy.copy(a) + cupy.random.shuffle(a) + testing.assert_allclose(cupy.sort(a, axis=0), b) + + # Test seed + + @testing.for_all_dtypes() + def test_shuffle_seed1(self, dtype): + a = testing.shaped_random((10,), cupy, dtype) + b = cupy.copy(a) + cupy.random.seed(0) + cupy.random.shuffle(a) + cupy.random.seed(0) + cupy.random.shuffle(b) + testing.assert_allclose(a, b) + + +@testing.parameterize( + *( + testing.product( + { + # 'num': [0, 1, 100, 1000, 10000, 100000], + "num": [0, 1, 100], # dpnp.random.permutation() is slow + } + ) + ) +) +@pytest.mark.skipif(not has_support_aspect64(), reason="fp64 is required") +class TestPermutationSoundness(unittest.TestCase): + + def setUp(self): + a = cupy.random.permutation(self.num) + self.a = a + + # Test soundness + + @_condition.repeat(3) + def test_permutation_soundness(self): + assert (numpy.sort(self.a) == numpy.arange(self.num)).all() + + +@testing.parameterize( + *( + testing.product( + { + "offset": [0, 17, 34, 51], + "gap": [1, 2, 3, 5, 7], + "mask": [1, 2, 4, 8, 16, 32, 64, 128], + } + ) + ) +) +class TestPermutationRandomness(unittest.TestCase): + + num = 256 + + def setUp(self): + a = cupy.random.permutation(self.num) + self.a = a + self.num_half = int(self.num / 2) + + # Simple bit proportion test + + # This test is to check kind of randomness of permutation. + # An intuition behind this test is that, when you make a sub-array + # by regularly extracting half elements from the permuted array, + # the sub-array should also hold randomness and accordingly + # frequency of appearance of 0 and 1 at each bit position of + # whole elements in the sub-array should become similar + # when elements count of original array is 2^N. + # Note that this is not an established method to check randomness. + # TODO(anaruse): implement randomness check using some established methods. + @_condition.repeat_with_success_at_least(5, 3) + @pytest.mark.skip("no support of index as numpy array") + def test_permutation_randomness(self): + if self.mask > self.num_half: + return + index = numpy.arange(self.num_half) + index = (index * self.gap + self.offset) % self.num + samples = self.a[index] + ret = samples & self.mask > 0 + count = numpy.count_nonzero(ret) # expectation: self.num_half / 2 + if count > self.num_half - count: + count = self.num_half - count + prob_le_count = self._calc_probability(count) + if prob_le_count < 0.001: + raise + + def _calc_probability(self, count): + comb_all = self._comb(self.num, self.num_half) + comb_le_count = 0 + for i in range(count + 1): + tmp = self._comb(self.num_half, i) + comb_i = tmp * tmp + comb_le_count += comb_i + prob = comb_le_count / comb_all + return prob + + def _comb(self, N, k): + val = numpy.float64(1) + for i in range(k): + val *= (N - i) / (k - i) + return val diff --git a/dpnp/tests/third_party/cupy/random_tests/test_random.py b/dpnp/tests/third_party/cupy/random_tests/test_random.py new file mode 100644 index 000000000000..60b1f391dce4 --- /dev/null +++ b/dpnp/tests/third_party/cupy/random_tests/test_random.py @@ -0,0 +1,22 @@ +import unittest + +import pytest + +from dpnp import random +from dpnp.tests.third_party.cupy import testing + + +@pytest.mark.skip("random.get_random_state() is not supported yet") +class TestResetSeed(unittest.TestCase): + + @testing.for_float_dtypes(no_float16=True) + def test_reset_seed(self, dtype): + rs = random.get_random_state() + rs.seed(0) + l1 = rs.rand(10, dtype=dtype) + + rs = random.get_random_state() + rs.seed(0) + l2 = rs.rand(10, dtype=dtype) + + testing.assert_array_equal(l1, l2) diff --git a/dpnp/tests/third_party/cupy/random_tests/test_sample.py b/dpnp/tests/third_party/cupy/random_tests/test_sample.py index 8ce99ca4fdf5..703385c37690 100644 --- a/dpnp/tests/third_party/cupy/random_tests/test_sample.py +++ b/dpnp/tests/third_party/cupy/random_tests/test_sample.py @@ -11,13 +11,14 @@ class TestRandint(unittest.TestCase): + def test_lo_hi_reversed(self): with self.assertRaises(ValueError): random.randint(100, 1) def test_lo_hi_equal(self): with self.assertRaises(ValueError): - random.randint(3, 3, size=3) + random.randint(3, 3, size=0) with self.assertRaises(ValueError): # int(-0.2) is not less than int(0.3) @@ -25,49 +26,46 @@ def test_lo_hi_equal(self): def test_lo_hi_nonrandom(self): a = random.randint(-0.9, 1.1, size=3) - numpy.testing.assert_array_equal(a, cupy.full((3,), 0)) + testing.assert_array_equal(a, cupy.full((3,), 0)) a = random.randint(-1.1, -0.9, size=(2, 2)) - numpy.testing.assert_array_equal(a, cupy.full((2, 2), -1)) + testing.assert_array_equal(a, cupy.full((2, 2), -1)) def test_zero_sizes(self): a = random.randint(10, size=(0,)) - numpy.testing.assert_array_equal(a, cupy.array(())) + testing.assert_array_equal(a, cupy.array(())) a = random.randint(10, size=0) - numpy.testing.assert_array_equal(a, cupy.array(())) + testing.assert_array_equal(a, cupy.array(())) @testing.fix_random() class TestRandint2(unittest.TestCase): - @pytest.mark.usefixtures("allow_fall_back_on_numpy") + @_condition.repeat(3, 10) def test_bound_1(self): - vals = [random.randint(0, 10, (2, 3)) for _ in range(10)] + vals = [random.randint(0, 10, (2, 3)) for _ in range(20)] for val in vals: - self.assertEqual(val.shape, (2, 3)) - self.assertEqual(min(_.min() for _ in vals), 0) - self.assertEqual(max(_.max() for _ in vals), 9) + assert val.shape == (2, 3) + assert min(_.min() for _ in vals) == 0 + assert max(_.max() for _ in vals) == 9 - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @_condition.repeat(3, 10) def test_bound_2(self): vals = [random.randint(0, 2) for _ in range(20)] for val in vals: - self.assertEqual(val.shape, ()) - self.assertEqual(min(_.min() for _ in vals), 0) - self.assertEqual(max(_.max() for _ in vals), 1) + assert val.shape == () + assert min(vals) == 0 + assert max(vals) == 1 - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @_condition.repeat(3, 10) def test_bound_overflow(self): # 100 - (-100) exceeds the range of int8 val = random.randint(numpy.int8(-100), numpy.int8(100), size=20) - self.assertEqual(val.shape, (20,)) - self.assertGreaterEqual(val.min(), -100) - self.assertLess(val.max(), 100) + assert val.shape == (20,) + assert val.min() >= -100 + assert val.max() < 100 - @pytest.mark.usefixtures("allow_fall_back_on_numpy") @_condition.repeat(3, 10) def test_bound_float1(self): # generate floats s.t. int(low) < int(high) @@ -76,26 +74,25 @@ def test_bound_float1(self): high += 1 vals = [random.randint(low, high, (2, 3)) for _ in range(10)] for val in vals: - self.assertEqual(val.shape, (2, 3)) - self.assertEqual(min(_.min() for _ in vals), int(low)) - self.assertEqual(max(_.max() for _ in vals), int(high) - 1) + assert val.shape == (2, 3) + assert min(_.min() for _ in vals) == int(low) + assert max(_.max() for _ in vals) == int(high) - 1 - @pytest.mark.usefixtures("allow_fall_back_on_numpy") def test_bound_float2(self): vals = [random.randint(-1.0, 1.0, (2, 3)) for _ in range(10)] for val in vals: - self.assertEqual(val.shape, (2, 3)) - self.assertEqual(min(_.min() for _ in vals), -1) - self.assertEqual(max(_.max() for _ in vals), 0) + assert val.shape == (2, 3) + assert min(_.min() for _ in vals) == -1 + assert max(_.max() for _ in vals) == 0 @_condition.repeat(3, 10) def test_goodness_of_fit(self): mx = 5 trial = 100 - vals = [numpy.random.randint(mx) for _ in range(trial)] + vals = [random.randint(mx) for _ in range(trial)] counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] expected = numpy.array([float(trial) / mx] * mx) - self.assertTrue(_hypothesis.chi_square_test(counts, expected)) + assert _hypothesis.chi_square_test(counts, expected) @_condition.repeat(3, 10) def test_goodness_of_fit_2(self): @@ -103,43 +100,52 @@ def test_goodness_of_fit_2(self): vals = random.randint(mx, size=(5, 20)) counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] expected = numpy.array([float(vals.size) / mx] * mx) - self.assertTrue(_hypothesis.chi_square_test(counts, expected)) + assert _hypothesis.chi_square_test(counts, expected) +@pytest.mark.skip("high=(max+1) is not supported") class TestRandintDtype(unittest.TestCase): - # numpy.int8, numpy.uint8, numpy.int16, numpy.uint16, numpy.int32]) - @testing.for_dtypes([numpy.int32]) + + @testing.with_requires("numpy>=2.0") + @testing.for_dtypes( + [numpy.int8, numpy.uint8, numpy.int16, numpy.uint16, numpy.int32] + ) def test_dtype(self, dtype): size = (1000,) low = numpy.iinfo(dtype).min - high = numpy.iinfo(dtype).max + high = numpy.iinfo(dtype).max + 1 x = random.randint(low, high, size, dtype) - self.assertLessEqual(low, min(x)) - self.assertLessEqual(max(x), high) + assert low <= min(x) + assert max(x) <= high - # @testing.for_int_dtypes(no_bool=True) + @testing.for_int_dtypes(no_bool=True) @testing.for_dtypes([numpy.int32]) def test_dtype2(self, dtype): dtype = numpy.dtype(dtype) + # randint does not support 64 bit integers + if dtype in (numpy.int64, numpy.uint64): + return + iinfo = numpy.iinfo(dtype) size = (10000,) - x = random.randint(iinfo.min, iinfo.max, size, dtype) - self.assertEqual(x.dtype, dtype) - self.assertLessEqual(iinfo.min, min(x)) - self.assertLessEqual(max(x), iinfo.max) + x = random.randint(iinfo.min, iinfo.max + 1, size, dtype).get() + assert x.dtype == dtype + assert iinfo.min <= min(x) + assert max(x) <= iinfo.max # Lower bound check - with self.assertRaises(OverflowError): + with self.assertRaises(ValueError): random.randint(iinfo.min - 1, iinfo.min + 10, size, dtype) # Upper bound check - with self.assertRaises(OverflowError): + with self.assertRaises(ValueError): random.randint(iinfo.max - 10, iinfo.max + 2, size, dtype) class TestRandomIntegers(unittest.TestCase): + def test_normal(self): with mock.patch("dpnp.random.RandomState.randint") as m: random.random_integers(3, 5) @@ -164,50 +170,53 @@ def test_size_is_not_none(self): @testing.fix_random() class TestRandomIntegers2(unittest.TestCase): + @_condition.repeat(3, 10) def test_bound_1(self): - vals = [random.random_integers(0, 10, (2, 3)).get() for _ in range(10)] + vals = [random.random_integers(0, 10, (2, 3)) for _ in range(10)] for val in vals: - self.assertEqual(val.shape, (2, 3)) - self.assertEqual(min(_.min() for _ in vals), 0) - self.assertEqual(max(_.max() for _ in vals), 10) + assert val.shape == (2, 3) + assert min(_.min() for _ in vals) == 0 + assert max(_.max() for _ in vals) == 10 @_condition.repeat(3, 10) def test_bound_2(self): - vals = [random.random_integers(0, 2).get() for _ in range(20)] + vals = [random.random_integers(0, 2) for _ in range(20)] for val in vals: - self.assertEqual(val.shape, ()) - self.assertEqual(min(vals), 0) - self.assertEqual(max(vals), 2) + assert val.shape == () + assert min(vals) == 0 + assert max(vals) == 2 @_condition.repeat(3, 10) def test_goodness_of_fit(self): mx = 5 trial = 100 - vals = [random.randint(0, mx).get() for _ in range(trial)] + vals = [random.randint(0, mx) for _ in range(trial)] counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] expected = numpy.array([float(trial) / mx] * mx) - self.assertTrue(_hypothesis.chi_square_test(counts, expected)) + assert _hypothesis.chi_square_test(counts, expected) @_condition.repeat(3, 10) def test_goodness_of_fit_2(self): mx = 5 - vals = random.randint(0, mx, (5, 20)).get() + vals = random.randint(0, mx, (5, 20)) counts = numpy.histogram(vals, bins=numpy.arange(mx + 1))[0] expected = numpy.array([float(vals.size) / mx] * mx) - self.assertTrue(_hypothesis.chi_square_test(counts, expected)) + assert _hypothesis.chi_square_test(counts, expected) +@pytest.mark.skip("random.choice() is not supported yet") class TestChoice(unittest.TestCase): + def setUp(self): - self.rs_tmp = random.generator._random_states + self.rs_tmp = random._generator._random_states device_id = cuda.Device().id self.m = mock.Mock() self.m.choice.return_value = 0 - random.generator._random_states = {device_id: self.m} + random._generator._random_states = {device_id: self.m} def tearDown(self): - random.generator._random_states = self.rs_tmp + random._generator._random_states = self.rs_tmp def test_size_and_replace_and_p_are_none(self): random.choice(3) @@ -243,10 +252,11 @@ def test_no_none(self): class TestRandomSample(unittest.TestCase): + def test_rand(self): - # no keyword argument 'dtype' in dpnp - with self.assertRaises(TypeError): - random.rand(1, 2, 3, dtype=numpy.float32) + with mock.patch("dpnp.random.RandomState.random_sample") as m: + random.rand(1, 2, 3) + m.assert_called_once_with(size=(1, 2, 3), usm_type="device") def test_rand_default_dtype(self): with mock.patch("dpnp.random.RandomState.random_sample") as m: @@ -280,12 +290,14 @@ def test_randn_invalid_argument(self): {"size": (1, 0)}, ) @testing.fix_random() +@pytest.mark.skip("random.multinomial() is not fully supported") class TestMultinomial(unittest.TestCase): + @_condition.repeat(3, 10) @testing.for_float_dtypes() @testing.numpy_cupy_allclose(rtol=0.05) def test_multinomial(self, xp, dtype): pvals = xp.array([0.2, 0.3, 0.5], dtype) x = xp.random.multinomial(100000, pvals, self.size) - self.assertEqual(x.dtype, "l") + assert x.dtype.kind == "l" return x / 100000 diff --git a/dpnp/tests/third_party/cupy/sorting_tests/test_count.py b/dpnp/tests/third_party/cupy/sorting_tests/test_count.py index b4e8be11fc77..ede4192762a3 100644 --- a/dpnp/tests/third_party/cupy/sorting_tests/test_count.py +++ b/dpnp/tests/third_party/cupy/sorting_tests/test_count.py @@ -7,6 +7,7 @@ class TestCount(unittest.TestCase): + @testing.for_all_dtypes() def test_count_nonzero(self, dtype): def func(xp): diff --git a/dpnp/tests/third_party/cupy/sorting_tests/test_search.py b/dpnp/tests/third_party/cupy/sorting_tests/test_search.py index b2b201bd7b6a..cbbc2efac463 100644 --- a/dpnp/tests/third_party/cupy/sorting_tests/test_search.py +++ b/dpnp/tests/third_party/cupy/sorting_tests/test_search.py @@ -7,6 +7,7 @@ class TestSearch: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_argmax_all(self, xp, dtype): @@ -167,6 +168,13 @@ def test_argmin_int32_overflow(self): assert a.argmin().item() == 2**32 +# TODO(leofang): remove this once CUDA 9.0 is dropped +def _skip_cuda90(dtype): + ver = cupy.cuda.runtime.runtimeGetVersion() + if dtype == cupy.float16 and ver == 9000: + pytest.skip("CUB does not support fp16 on CUDA 9.0") + + # This class compares CUB results against NumPy's # TODO(leofang): test axis after support is added @testing.parameterize( @@ -180,6 +188,7 @@ def test_argmin_int32_overflow(self): ) @pytest.mark.skip("The CUB routine is not enabled") class TestCubReduction: + @pytest.fixture(autouse=True) def setUp(self): self.order, self.axis = self.order_and_axis @@ -200,6 +209,7 @@ def setUp(self): @testing.for_dtypes("bhilBHILefdFD") @testing.numpy_cupy_allclose(rtol=1e-5, contiguous_check=False) def test_cub_argmin(self, xp, dtype): + _skip_cuda90(dtype) a = testing.shaped_random(self.shape, xp, dtype) if self.order == "C": a = xp.ascontiguousarray(a) @@ -220,7 +230,7 @@ def test_cub_argmin(self, xp, dtype): # this is the only function we can mock; the rest is cdef'd func_name = "cupy._core._cub_reduction." func_name += "_SimpleCubReductionKernel_get_cached_function" - # func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function if self.axis is not None and len(self.shape) > 1: times_called = 1 # one pass else: @@ -235,7 +245,7 @@ def test_cub_argmin(self, xp, dtype): @testing.for_dtypes("bhilBHILefdFD") @testing.numpy_cupy_allclose(rtol=1e-5, contiguous_check=False) def test_cub_argmax(self, xp, dtype): - # _skip_cuda90(dtype) + _skip_cuda90(dtype) a = testing.shaped_random(self.shape, xp, dtype) if self.order == "C": a = xp.ascontiguousarray(a) @@ -256,7 +266,7 @@ def test_cub_argmax(self, xp, dtype): # this is the only function we can mock; the rest is cdef'd func_name = "cupy._core._cub_reduction." func_name += "_SimpleCubReductionKernel_get_cached_function" - # func = _cub_reduction._SimpleCubReductionKernel_get_cached_function + func = _cub_reduction._SimpleCubReductionKernel_get_cached_function if self.axis is not None and len(self.shape) > 1: times_called = 1 # one pass else: @@ -280,6 +290,7 @@ def test_cub_argmax(self, xp, dtype): ) @pytest.mark.skip("dtype is not supported") class TestArgMinMaxDtype: + @testing.for_dtypes( dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64], name="result_dtype", @@ -304,6 +315,7 @@ def test_argminmax_dtype(self, in_dtype, result_dtype): {"cond_shape": (3, 4), "x_shape": (2, 3, 4), "y_shape": (4,)}, ) class TestWhereTwoArrays: + @testing.for_all_dtypes_combination(names=["cond_type", "x_type", "y_type"]) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_where_two_arrays(self, xp, cond_type, x_type, y_type): @@ -323,6 +335,7 @@ def test_where_two_arrays(self, xp, cond_type, x_type, y_type): {"cond_shape": (3, 4)}, ) class TestWhereCond: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_where_cond(self, xp, dtype): @@ -332,6 +345,7 @@ def test_where_cond(self, xp, dtype): class TestWhereError: + def test_one_argument(self): for xp in (numpy, cupy): cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_) @@ -349,6 +363,7 @@ def test_one_argument(self): _ids=False, # Do not generate ids from randomly generated params ) class TestNonzero: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_nonzero(self, xp, dtype): @@ -360,15 +375,21 @@ def test_nonzero(self, xp, dtype): {"array": numpy.array(0)}, {"array": numpy.array(1)}, ) -@pytest.mark.skip("Only positive rank is supported") @testing.with_requires("numpy>=1.17.0") class TestNonzeroZeroDimension: + + @testing.with_requires("numpy>=2.1") + @testing.for_all_dtypes() + def test_nonzero(self, dtype): + array = cupy.array(self.array, dtype=dtype) + with pytest.raises(ValueError): + cupy.nonzero(array) + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() - def test_nonzero(self, xp, dtype): + def test_nonzero_explicit(self, xp, dtype): array = xp.array(self.array, dtype=dtype) - with testing.assert_warns(DeprecationWarning): - return xp.nonzero(array) + return xp.nonzero(xp.atleast_1d(array)) @testing.parameterize( @@ -382,6 +403,7 @@ def test_nonzero(self, xp, dtype): _ids=False, # Do not generate ids from randomly generated params ) class TestFlatNonzero: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_flatnonzero(self, xp, dtype): @@ -398,6 +420,7 @@ def test_flatnonzero(self, xp, dtype): _ids=False, # Do not generate ids from randomly generated params ) class TestArgwhere: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_argwhere(self, xp, dtype): @@ -411,6 +434,7 @@ def test_argwhere(self, xp, dtype): ) @testing.with_requires("numpy>=1.18") class TestArgwhereZeroDimension: + @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_argwhere(self, xp, dtype): @@ -419,6 +443,7 @@ def test_argwhere(self, xp, dtype): class TestNanArgMin: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmin_all(self, xp, dtype): @@ -509,6 +534,7 @@ def test_nanargmin_zero_size_axis1(self, xp, dtype): class TestNanArgMax: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() def test_nanargmax_all(self, xp, dtype): @@ -620,6 +646,7 @@ def test_nanargmax_zero_size_axis1(self, xp, dtype): ) ) class TestSearchSorted: + @testing.for_all_dtypes(no_bool=True) @testing.numpy_cupy_array_equal() def test_searchsorted(self, xp, dtype): @@ -639,6 +666,7 @@ def test_ndarray_searchsorted(self, xp, dtype): @testing.parameterize({"side": "left"}, {"side": "right"}) class TestSearchSortedNanInf: + @testing.numpy_cupy_array_equal() def test_searchsorted_nanbins(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) @@ -704,6 +732,7 @@ def test_searchsorted_minf(self, xp): class TestSearchSortedInvalid: + # Can't test unordered bins due to numpy undefined # behavior for searchsorted @@ -723,6 +752,7 @@ def test_ndarray_searchsorted_ndbins(self): class TestSearchSortedWithSorter: + @testing.numpy_cupy_array_equal() def test_sorter(self, xp): x = testing.shaped_arange((12,), xp, xp.float64) @@ -741,16 +771,16 @@ def test_invalid_sorter(self): def test_nonint_sorter(self): for xp in (numpy, cupy): - dt = cupy.default_float_type() - x = testing.shaped_arange((12,), xp, dt) + x = testing.shaped_arange((12,), xp, xp.float32) bins = xp.array([10, 4, 2, 1, 8]) - sorter = xp.array([], dtype=dt) + sorter = xp.array([], dtype=xp.float32) with pytest.raises((TypeError, ValueError)): xp.searchsorted(bins, x, sorter=sorter) @testing.parameterize({"side": "left"}, {"side": "right"}) class TestNdarraySearchSortedNanInf: + @testing.numpy_cupy_array_equal() def test_searchsorted_nanbins(self, xp): x = testing.shaped_arange((10,), xp, xp.float64) @@ -816,6 +846,7 @@ def test_searchsorted_minf(self, xp): class TestNdarraySearchSortedWithSorter: + @testing.numpy_cupy_array_equal() def test_sorter(self, xp): x = testing.shaped_arange((12,), xp, xp.float64) @@ -834,9 +865,8 @@ def test_invalid_sorter(self): def test_nonint_sorter(self): for xp in (numpy, cupy): - dt = cupy.default_float_type() - x = testing.shaped_arange((12,), xp, dt) + x = testing.shaped_arange((12,), xp, xp.float32) bins = xp.array([10, 4, 2, 1, 8]) - sorter = xp.array([], dtype=dt) + sorter = xp.array([], dtype=xp.float32) with pytest.raises((TypeError, ValueError)): bins.searchsorted(x, sorter=sorter) diff --git a/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py b/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py index 3d4eafa72046..dc619d77f786 100644 --- a/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py +++ b/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py @@ -20,6 +20,7 @@ def get_array_module(*args): class TestSort(unittest.TestCase): + # Test ranks def test_sort_zero_dim(self): @@ -68,11 +69,11 @@ def test_sort_contiguous(self, xp): a.sort() return a - @testing.numpy_cupy_array_equal() - def test_sort_non_contiguous(self, xp): - a = testing.shaped_random((10,), xp)[::2] # Non contiguous view - a.sort() - return a + @pytest.mark.skip("non-contiguous array is supported") + def test_sort_non_contiguous(self): + a = testing.shaped_random((10,), cupy)[::2] # Non contiguous view + with self.assertRaises(NotImplementedError): + a.sort() @testing.numpy_cupy_array_equal() def test_external_sort_contiguous(self, xp): @@ -214,6 +215,7 @@ def test_large(self, xp): @pytest.mark.skip("lexsort() is not implemented yet") class TestLexsort(unittest.TestCase): + # Test ranks # TODO(niboshi): Fix xfail @@ -298,12 +300,15 @@ def test_F_order(self, xp): ) ) class TestArgsort(unittest.TestCase): - def argsort(self, a, axis=-1, kind=None): + + def argsort(self, a, axis=-1): if self.external: + # Need to explicitly specify kind="stable" + # numpy uses "quicksort" as default xp = cupy.get_array_module(a) - return xp.argsort(a, axis=axis, kind=kind) + return xp.argsort(a, axis=axis, kind="stable") else: - return a.argsort(axis=axis, kind=kind) + return a.argsort(axis=axis, kind="stable") # Test base cases @@ -319,7 +324,7 @@ def test_argsort_zero_dim(self, xp, dtype): @testing.numpy_cupy_array_equal() def test_argsort_one_dim(self, xp, dtype): a = testing.shaped_random((10,), xp, dtype) - return self.argsort(a, axis=-1, kind="stable") + return self.argsort(a) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() @@ -414,30 +419,8 @@ def test_nan2(self, xp, dtype): return self.argsort(a) -@pytest.mark.skip("msort() is deprecated") -class TestMsort(unittest.TestCase): - # Test base cases - - def test_msort_zero_dim(self): - for xp in (numpy, cupy): - a = testing.shaped_random((), xp) - with pytest.raises(AxisError): - xp.msort(a) - - @testing.for_all_dtypes() - @testing.numpy_cupy_array_equal() - def test_msort_one_dim(self, xp, dtype): - a = testing.shaped_random((10,), xp, dtype) - return xp.msort(a) - - @testing.for_all_dtypes() - @testing.numpy_cupy_array_equal() - def test_msort_multi_dim(self, xp, dtype): - a = testing.shaped_random((2, 3), xp, dtype) - return xp.msort(a) - - class TestSort_complex(unittest.TestCase): + def test_sort_complex_zero_dim(self): for xp in (numpy, cupy): a = testing.shaped_random((), xp) @@ -474,6 +457,7 @@ def test_sort_complex_nan(self, xp, dtype): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestPartition(unittest.TestCase): + def partition(self, a, kth, axis=-1): if self.external: xp = cupy.get_array_module(a) @@ -622,6 +606,7 @@ def test_partition_invalid_negative_axis2(self): ) @pytest.mark.skip("not fully supported yet") class TestArgpartition(unittest.TestCase): + def argpartition(self, a, kth, axis=-1): if self.external: xp = cupy.get_array_module(a) diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_correlation.py b/dpnp/tests/third_party/cupy/statistics_tests/test_correlation.py index 2b2f846bb71e..f8d7feb75c73 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_correlation.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_correlation.py @@ -1,9 +1,7 @@ -import sys import unittest import numpy import pytest -from dpctl import select_default_device import dpnp as cupy from dpnp.tests.helper import has_support_aspect64 @@ -11,6 +9,7 @@ class TestCorrcoef(unittest.TestCase): + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_corrcoef(self, xp, dtype): @@ -37,10 +36,22 @@ def test_corrcoef_rowvar(self, xp, dtype): y = testing.shaped_arange((2, 3), xp, dtype) return xp.corrcoef(a, y=y, rowvar=False) + @testing.with_requires("numpy>=1.20") + @testing.for_all_dtypes() + @testing.numpy_cupy_allclose(accept_error=True) + def test_corrcoef_dtype(self, xp, dtype): + a = testing.shaped_arange((2, 3), xp, dtype) + y = testing.shaped_arange((2, 3), xp, dtype) + try: + res = xp.corrcoef(a, y=y, dtype=dtype) + except ValueError as e: + if xp is cupy: # dpnp raises ValueError(...) + raise TypeError(e) + raise + return res + class TestCov(unittest.TestCase): - # resulting dtype will differ with numpy if no fp64 support by a default device - _has_fp64 = select_default_device().has_aspect_fp64 def generate_input(self, a_shape, y_shape, xp, dtype): a = testing.shaped_arange(a_shape, xp, dtype) @@ -50,7 +61,9 @@ def generate_input(self, a_shape, y_shape, xp, dtype): return a, y @testing.for_all_dtypes() - @testing.numpy_cupy_allclose(type_check=_has_fp64, accept_error=True) + @testing.numpy_cupy_allclose( + type_check=has_support_aspect64(), accept_error=True + ) def check( self, a_shape, @@ -153,6 +166,7 @@ def test_cov_empty(self): ) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestCorrelateShapeCombination(unittest.TestCase): + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-4) def test_correlate(self, xp, dtype): @@ -161,34 +175,36 @@ def test_correlate(self, xp, dtype): return xp.correlate(a, b, mode=self.mode) -@testing.parameterize(*testing.product({"mode": ["valid", "full", "same"]})) +@pytest.mark.parametrize("mode", ["valid", "full", "same"]) @pytest.mark.usefixtures("allow_fall_back_on_numpy") -class TestCorrelate(unittest.TestCase): +class TestCorrelate: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=1e-5) - def test_correlate_non_contiguous(self, xp, dtype): + def test_correlate_non_contiguous(self, xp, dtype, mode): a = testing.shaped_arange((300,), xp, dtype) b = testing.shaped_arange((100,), xp, dtype) - return xp.correlate(a[::200], b[10::70], mode=self.mode) + return xp.correlate(a[::200], b[10::70], mode=mode) @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-4) - def test_correlate_large_non_contiguous(self, xp, dtype): + def test_correlate_large_non_contiguous(self, xp, dtype, mode): a = testing.shaped_arange((10000,), xp, dtype) b = testing.shaped_arange((1000,), xp, dtype) - return xp.correlate(a[200::], b[10::700], mode=self.mode) + return xp.correlate(a[200::], b[10::700], mode=mode) @testing.for_all_dtypes_combination(names=["dtype1", "dtype2"]) @testing.numpy_cupy_allclose(rtol=1e-2, type_check=has_support_aspect64()) - def test_correlate_diff_types(self, xp, dtype1, dtype2): + def test_correlate_diff_types(self, xp, dtype1, dtype2, mode): a = testing.shaped_random((200,), xp, dtype1) b = testing.shaped_random((100,), xp, dtype2) - return xp.correlate(a, b, mode=self.mode) + return xp.correlate(a, b, mode=mode) @testing.parameterize(*testing.product({"mode": ["valid", "same", "full"]})) @pytest.mark.usefixtures("allow_fall_back_on_numpy") class TestCorrelateInvalid(unittest.TestCase): + @testing.with_requires("numpy>=1.18") @testing.for_all_dtypes() def test_correlate_empty(self, dtype): diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py b/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py index 88832a070aa4..2f5ff00d9579 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_histogram.py @@ -42,6 +42,7 @@ def for_all_dtypes_combination_bincount(names): class TestHistogram(unittest.TestCase): + @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(atol=1e-6, type_check=has_support_aspect64()) def test_histogram(self, xp, dtype): @@ -94,7 +95,7 @@ def test_histogram_range_with_density(self, xp, dtype): return h @testing.for_float_dtypes() - @testing.numpy_cupy_allclose(atol=1e-6, type_check=False) + @testing.numpy_cupy_allclose(atol=1e-6, type_check=has_support_aspect64()) def test_histogram_range_with_weights_and_density(self, xp, dtype): a = xp.arange(10, dtype=dtype) + 0.5 w = xp.arange(10, dtype=dtype) + 0.5 @@ -135,7 +136,9 @@ def test_histogram_int_weights_dtype(self, xp, dtype): def test_histogram_float_weights_dtype(self, xp, dtype): # Check the type of the returned histogram a = xp.arange(10, dtype=dtype) - h, b = xp.histogram(a, weights=xp.ones(10, dtype=xp.float32)) + h, b = xp.histogram( + a, weights=xp.ones(10, dtype=cupy.default_float_type()) + ) assert xp.issubdtype(h.dtype, xp.floating) return h @@ -355,6 +358,7 @@ def test_bincount_too_small_minlength(self, dtype): ) ) class TestDigitize: + @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_array_equal() def test_digitize(self, xp, dtype): @@ -369,6 +373,7 @@ def test_digitize(self, xp, dtype): @testing.parameterize({"right": True}, {"right": False}) class TestDigitizeNanInf(unittest.TestCase): + @testing.numpy_cupy_array_equal() def test_digitize_nan(self, xp): x = testing.shaped_arange((14,), xp, xp.float32) @@ -438,6 +443,7 @@ def test_searchsorted_minf(self, xp): class TestDigitizeInvalid(unittest.TestCase): + def test_digitize_complex(self): for xp in (numpy, cupy): x = testing.shaped_arange((14,), xp, xp.complex64) @@ -454,6 +460,7 @@ def test_digitize_nd_bins(self): @pytest.mark.skip("histogramdd() is not implemented yet") +# @pytest.mark.skip(reason="XXX: NP2.0: histogramdd dtype") @testing.parameterize( *testing.product( { @@ -473,6 +480,7 @@ def test_digitize_nd_bins(self): ) ) class TestHistogramdd: + @testing.for_all_dtypes(no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(atol=1e-7, rtol=1e-7) def test_histogramdd(self, xp, dtype): @@ -499,6 +507,7 @@ def test_histogramdd(self, xp, dtype): @pytest.mark.skip("histogramdd() is not implemented yet") class TestHistogramddErrors(unittest.TestCase): + def test_histogramdd_invalid_bins(self): for xp in (numpy, cupy): x = testing.shaped_random((16, 2), xp, scale=100) @@ -544,6 +553,7 @@ def test_histogramdd_disallow_arraylike_bins(self): @pytest.mark.skip("histogram2d() is not implemented yet") +# @pytest.mark.skip(reason="XXX: NP2.0: histogram2d dtype") @testing.parameterize( *testing.product( { @@ -556,11 +566,13 @@ def test_histogramdd_disallow_arraylike_bins(self): ) ) class TestHistogram2d: + @testing.for_all_dtypes(no_bool=True, no_complex=True) - @testing.numpy_cupy_allclose(atol=1e-7, rtol=1e-7) + @testing.numpy_cupy_allclose(atol=1e-2, rtol=1e-7) def test_histogram2d(self, xp, dtype): x = testing.shaped_random((100,), xp, dtype, scale=100) y = testing.shaped_random((100,), xp, dtype, scale=100) + if self.bins == "array_list": bins = [xp.arange(0, 100, 4), xp.arange(0, 100, 10)] elif self.bins == "array": @@ -584,6 +596,7 @@ def test_histogram2d(self, xp, dtype): @pytest.mark.skip("histogram2d() is not implemented yet") class TestHistogram2dErrors(unittest.TestCase): + def test_histogram2d_disallow_arraylike_bins(self): x = testing.shaped_random((16,), cupy, scale=100) y = testing.shaped_random((16,), cupy, scale=100) diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py b/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py index e10a05a4763b..bf5d37df2fba 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -1,3 +1,5 @@ +import math + import numpy import pytest from dpctl.tensor._numpy_helper import AxisError @@ -12,6 +14,7 @@ class TestMedian: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_median_noaxis(self, xp, dtype): @@ -61,7 +64,14 @@ def test_median_invalid_axis(self): return xp.median(a, (-a.ndim - 1, 1), keepdims=False) with pytest.raises(AxisError): - return xp.median(a, (0, a.ndim), keepdims=False) + return xp.median( + a, + ( + 0, + a.ndim, + ), + keepdims=False, + ) @testing.for_dtypes("efdFD") @testing.numpy_cupy_allclose() @@ -83,6 +93,7 @@ def test_median_nan(self, xp, dtype): ) ) class TestMedianAxis: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_median_axis_sequence(self, xp, dtype): @@ -140,6 +151,7 @@ def test_nanmedian(self, xp, dtype): class TestAverage: + _multiprocess_can_split_ = True @testing.for_all_dtypes() @@ -164,7 +176,7 @@ def test_average_weights(self, xp, dtype): @testing.for_all_dtypes() @testing.numpy_cupy_allclose(rtol=2e-7, type_check=has_support_aspect64()) @pytest.mark.parametrize( - "axis, weights", [(1, False), (None, True), (1, True)] + "axis,weights", [(1, False), (None, True), (1, True)] ) def test_returned(self, xp, dtype, axis, weights): a = testing.shaped_arange((2, 3), xp, dtype) @@ -196,6 +208,7 @@ def test_average_keepdims_noaxis(self, xp, dtype, returned): class TestMeanVar: + @testing.for_all_dtypes() @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_mean_all(self, xp, dtype): @@ -223,7 +236,7 @@ def test_external_mean_axis(self, xp, dtype): @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-06) def test_mean_all_float32_dtype(self, xp, dtype): - a = xp.full((2, 3, 4), 123456789, dtype=dtype) + a = testing.shaped_arange((2, 3, 4), xp, dtype=dtype) return xp.mean(a, dtype=numpy.float32) @testing.for_all_dtypes(no_complex=True) @@ -345,13 +358,14 @@ def test_external_std_axis_ddof(self, xp, dtype): ) ) class TestNanMean: + @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_without_nan(self, xp, dtype): a = testing.shaped_random(self.shape, xp, dtype) return xp.nanmean(a, axis=self.axis, keepdims=self.keepdims) - @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_with_nan_float(self, xp, dtype): @@ -365,17 +379,13 @@ def test_nanmean_with_nan_float(self, xp, dtype): class TestNanMeanAdditional: - @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") + + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) - @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) + @testing.numpy_cupy_allclose(rtol=1e-6) def test_nanmean_out(self, xp, dtype): a = testing.shaped_random((10, 20, 30), xp, dtype) - # `numpy.mean` allows ``unsafe`` casting while `dpnp.mean` does not. - # So, output data type cannot be the same as input. - out_dtype = ( - cupy.default_float_type(a.device) if xp == cupy else numpy.float64 - ) - z = xp.zeros((20, 30), dtype=out_dtype) + z = xp.zeros((20, 30), dtype=dtype) if a.dtype.kind not in "biu": a[1, :] = xp.nan @@ -404,7 +414,7 @@ def test_nanmean_float16(self, xp): a[0][0] = xp.nan return xp.nanmean(a) - @pytest.mark.usefixtures("suppress_mean_empty_slice_numpy_warnings") + @ignore_runtime_warnings @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanmean_all_nan(self, xp): a = xp.zeros((3, 4)) @@ -423,7 +433,8 @@ def test_nanmean_all_nan(self, xp): ) ) class TestNanVarStd: - @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar(self, xp, dtype): @@ -434,7 +445,7 @@ def test_nanvar(self, xp, dtype): a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims ) - @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanstd(self, xp, dtype): @@ -447,7 +458,8 @@ def test_nanstd(self, xp, dtype): class TestNanVarStdAdditional: - @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanvar_out(self, xp, dtype): @@ -481,7 +493,7 @@ def test_nanvar_float16(self, xp): a[0][0] = xp.nan return xp.nanvar(a, axis=0) - @pytest.mark.usefixtures("suppress_dof_numpy_warnings") + @ignore_runtime_warnings @testing.for_all_dtypes(no_float16=True) @testing.numpy_cupy_allclose(rtol=1e-6, type_check=has_support_aspect64()) def test_nanstd_out(self, xp, dtype): @@ -537,6 +549,7 @@ def test_nanstd_float16(self, xp): "suppress_mean_empty_slice_numpy_warnings", ) class TestProductZeroLength: + @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose(type_check=has_support_aspect64()) def test_external_mean_zero_len(self, xp, dtype): diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_order.py b/dpnp/tests/third_party/cupy/statistics_tests/test_order.py index 7bffc360c01a..2dbbb2ace738 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_order.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_order.py @@ -7,19 +7,19 @@ from dpnp.tests.third_party.cupy import testing _all_methods = ( - # "inverted_cdf", # TODO(takagi) Not implemented - # "averaged_inverted_cdf", # TODO(takagi) Not implemented - # "closest_observation", # TODO(takagi) Not implemented - # "interpolated_inverted_cdf", # TODO(takagi) Not implemented - # "hazen", # TODO(takagi) Not implemented - # "weibull", # TODO(takagi) Not implemented + # 'inverted_cdf', # TODO(takagi) Not implemented + # 'averaged_inverted_cdf', # TODO(takagi) Not implemented + # 'closest_observation', # TODO(takagi) Not implemented + # 'interpolated_inverted_cdf', # TODO(takagi) Not implemented + # 'hazen', # TODO(takagi) Not implemented + # 'weibull', # TODO(takagi) Not implemented "linear", - # "median_unbiased", # TODO(takagi) Not implemented - # "normal_unbiased", # TODO(takagi) Not implemented + # 'median_unbiased', # TODO(takagi) Not implemented + # 'normal_unbiased', # TODO(takagi) Not implemented "lower", "higher", "midpoint", - # "nearest", # TODO(hvy): Not implemented + "nearest", ) @@ -27,9 +27,61 @@ def for_all_methods(name="method"): return pytest.mark.parametrize(name, _all_methods) +@pytest.mark.skip("dpnp.quantile() is not implemented yet") @testing.with_requires("numpy>=1.22.0rc1") -class TestOrder: - @for_all_methods() +class TestQuantile: + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_percentile_unexpected_method(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_random((4, 2, 3, 2), xp, dtype) + q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) + with pytest.raises(ValueError): + xp.percentile(a, q, axis=-1, method="deadbeef") + + # See gh-4453 + @testing.for_float_dtypes() + def test_percentile_memory_access(self, dtype): + # Create an allocator that guarantees array allocated in + # cupy.percentile call will be followed by a NaN + original_allocator = cuda.get_allocator() + + def controlled_allocator(size): + memptr = original_allocator(size) + base_size = memptr.mem.size + assert base_size % 512 == 0 + item_size = dtype().itemsize + shape = (base_size // item_size,) + x = cupy.ndarray(memptr=memptr, shape=shape, dtype=dtype) + x.fill(cupy.nan) + return memptr + + # Check that percentile still returns non-NaN results + a = testing.shaped_random((5,), cupy, dtype) + q = cupy.array((0, 100), dtype=dtype) + + cuda.set_allocator(controlled_allocator) + try: + percentiles = cupy.percentile(a, q, axis=None, method="linear") + finally: + cuda.set_allocator(original_allocator) + + assert not cupy.any(cupy.isnan(percentiles)) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_quantile_unexpected_method(self, dtype): + for xp in (numpy, cupy): + a = testing.shaped_random((4, 2, 3, 2), xp, dtype) + q = testing.shaped_random((5,), xp, dtype=dtype, scale=1) + with pytest.raises(ValueError): + xp.quantile(a, q, axis=-1, method="deadbeef") + + +@pytest.mark.skip("dpnp.quantile() is not implemented yet") +@testing.with_requires("numpy>=2.0") +@for_all_methods() +class TestQuantileMethods: + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() def test_percentile_defaults(self, xp, dtype, method): @@ -37,7 +89,6 @@ def test_percentile_defaults(self, xp, dtype, method): q = testing.shaped_random((3,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() def test_percentile_q_list(self, xp, dtype, method): @@ -45,7 +96,6 @@ def test_percentile_q_list(self, xp, dtype, method): q = [99, 99.9] return xp.percentile(a, q, method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_percentile_no_axis(self, xp, dtype, method): @@ -53,7 +103,6 @@ def test_percentile_no_axis(self, xp, dtype, method): q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, axis=None, method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_percentile_neg_axis(self, xp, dtype, method): @@ -61,7 +110,6 @@ def test_percentile_neg_axis(self, xp, dtype, method): q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, axis=-1, method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6) def test_percentile_tuple_axis(self, xp, dtype, method): @@ -69,7 +117,6 @@ def test_percentile_tuple_axis(self, xp, dtype, method): q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, axis=(0, 1, 2), method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose() def test_percentile_scalar_q(self, xp, dtype, method): @@ -77,7 +124,6 @@ def test_percentile_scalar_q(self, xp, dtype, method): q = 13.37 return xp.percentile(a, q, method=method) - @for_all_methods() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-5) def test_percentile_keepdims(self, xp, dtype, method): @@ -85,7 +131,6 @@ def test_percentile_keepdims(self, xp, dtype, method): q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, axis=None, keepdims=True, method=method) - @for_all_methods() @testing.for_float_dtypes(no_float16=True) # NumPy raises error on int8 @testing.numpy_cupy_allclose(rtol=1e-6) def test_percentile_out(self, xp, dtype, method): @@ -94,7 +139,17 @@ def test_percentile_out(self, xp, dtype, method): out = testing.shaped_random((5, 10, 2, 3), xp, dtype) return xp.percentile(a, q, axis=-1, method=method, out=out) - @for_all_methods() + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_percentile_overwrite(self, xp, dtype, method): + a = testing.shaped_random((10, 2, 3, 2), xp, dtype) + ap = a.copy() + q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) + res = xp.percentile(ap, q, axis=-1, method=method, overwrite_input=True) + + assert not xp.all(ap == a) + return res + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) def test_percentile_bad_q(self, dtype, method): for xp in (numpy, cupy): @@ -104,12 +159,101 @@ def test_percentile_bad_q(self, dtype, method): xp.percentile(a, q, axis=-1, method=method) @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) - def test_percentile_unxpected_method(self, dtype): + def test_percentile_out_of_range_q(self, dtype, method): for xp in (numpy, cupy): a = testing.shaped_random((4, 2, 3, 2), xp, dtype) - q = testing.shaped_random((5,), xp, dtype=dtype, scale=100) + for q in [[-0.1], [100.1]]: + with pytest.raises(ValueError): + xp.percentile(a, q, axis=-1, method=method) + + @testing.for_all_dtypes() + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose() + def test_quantile_defaults(self, xp, dtype, method): + a = testing.shaped_random((2, 3, 8), xp, dtype) + q = testing.shaped_random((3,), xp, scale=1) + return xp.quantile(a, q, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose() + def test_quantile_q_list(self, xp, dtype, method): + a = testing.shaped_arange((1001,), xp, dtype) + q = [0.99, 0.999] + return xp.quantile(a, q, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_quantile_no_axis(self, xp, dtype, method): + a = testing.shaped_random((10, 2, 4, 8), xp, dtype) + q = testing.shaped_random((5,), xp, scale=1) + return xp.quantile(a, q, axis=None, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_quantile_neg_axis(self, xp, dtype, method): + a = testing.shaped_random((4, 3, 10, 2, 8), xp, dtype) + q = testing.shaped_random((5,), xp, scale=1) + return xp.quantile(a, q, axis=-1, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_quantile_tuple_axis(self, xp, dtype, method): + a = testing.shaped_random((1, 6, 3, 2), xp, dtype) + q = testing.shaped_random((5,), xp, scale=1) + return xp.quantile(a, q, axis=(0, 1, 2), method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose() + def test_quantile_scalar_q(self, xp, dtype, method): + a = testing.shaped_random((2, 3, 8), xp, dtype) + q = 0.1337 + return xp.quantile(a, q, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose(rtol=1e-5) + def test_quantile_keepdims(self, xp, dtype, method): + a = testing.shaped_random((7, 2, 9, 2), xp, dtype) + q = testing.shaped_random((5,), xp, scale=1) + return xp.quantile(a, q, axis=None, keepdims=True, method=method) + + @testing.for_float_dtypes(no_float16=True) # NumPy raises error on int8 + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_quantile_out(self, xp, dtype, method): + a = testing.shaped_random((10, 2, 3, 2), xp, dtype) + q = testing.shaped_random((5,), xp, dtype=dtype, scale=1) + out = testing.shaped_random((5, 10, 2, 3), xp, dtype) + return xp.quantile(a, q, axis=-1, method=method, out=out) + + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose(rtol=1e-6) + def test_quantile_overwrite(self, xp, dtype, method): + a = testing.shaped_random((10, 2, 3, 2), xp, dtype) + ap = a.copy() + q = testing.shaped_random((5,), xp, dtype=dtype, scale=1) + + res = xp.quantile(a, q, axis=-1, method=method, overwrite_input=True) + + assert not xp.all(ap == a) + return res + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_quantile_bad_q(self, dtype, method): + for xp in (numpy, cupy): + a = testing.shaped_random((4, 2, 3, 2), xp, dtype) + q = testing.shaped_random((1, 2, 3), xp, dtype=dtype, scale=1) with pytest.raises(ValueError): - xp.percentile(a, q, axis=-1, method="deadbeef") + xp.quantile(a, q, axis=-1, method=method) + + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + def test_quantile_out_of_range_q(self, dtype, method): + for xp in (numpy, cupy): + a = testing.shaped_random((4, 2, 3, 2), xp, dtype) + for q in [[-0.1], [1.1]]: + with pytest.raises(ValueError): + xp.quantile(a, q, axis=-1, method=method) + + +class TestOrder: @testing.for_all_dtypes(no_complex=True) @testing.numpy_cupy_allclose() @@ -117,25 +261,25 @@ def test_nanmax_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.nanmax(a) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return xp.nanmax(a, axis=0) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_axis0(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return xp.nanmax(a, axis=0) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_axis1(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return xp.nanmax(a, axis=1) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmax_axis2(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -159,31 +303,31 @@ def test_nanmax_all_nan(self, xp, dtype): assert w[0].category is RuntimeWarning return m - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_all(self, xp, dtype): a = testing.shaped_random((2, 3), xp, dtype) return xp.nanmin(a) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_axis_large(self, xp, dtype): a = testing.shaped_random((3, 1000), xp, dtype) return xp.nanmin(a, axis=0) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_axis0(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return xp.nanmin(a, axis=0) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_axis1(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) return xp.nanmin(a, axis=1) - @testing.for_all_dtypes(no_complex=True) + @testing.for_all_dtypes() @testing.numpy_cupy_allclose() def test_nanmin_axis2(self, xp, dtype): a = testing.shaped_random((2, 3, 4), xp, dtype) @@ -248,3 +392,39 @@ def test_ptp_nan(self, xp, dtype): def test_ptp_all_nan(self, xp, dtype): a = xp.array([float("nan"), float("nan")], dtype) return xp.ptp(a) + + +# See gh-4607 +# "Magic" values used in this test were empirically found to result in +# non-monotonicity for less accurate linear interpolation formulas +@pytest.mark.skip("dpnp.percentile() is not implemented yet") +@testing.parameterize( + *testing.product( + { + "magic_value": ( + -29, + -53, + -207, + -16373, + -99999, + ) + } + ) +) +class TestPercentileMonotonic: + + @testing.with_requires("numpy>=1.22.0rc1") + @testing.for_float_dtypes(no_float16=True) + @testing.numpy_cupy_allclose() + def test_percentile_monotonic(self, dtype, xp): + a = testing.shaped_random((5,), xp, dtype) + + a[0] = self.magic_value + a[1] = self.magic_value + q = xp.linspace(0, 100, 21) + percentiles = xp.percentile(a, q, method="linear") + + # Assert that percentile output increases monotonically + assert xp.all(xp.diff(percentiles) >= 0) + + return percentiles diff --git a/dpnp/tests/third_party/cupy/test_init.py b/dpnp/tests/third_party/cupy/test_init.py new file mode 100644 index 000000000000..dbda6010e122 --- /dev/null +++ b/dpnp/tests/third_party/cupy/test_init.py @@ -0,0 +1,172 @@ +import operator +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from unittest import mock + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + + +def _run_script(code): + # subprocess is required not to interfere with cupy module imported in top + # of this file + temp_dir = tempfile.mkdtemp() + try: + script_path = os.path.join(temp_dir, "script.py") + with open(script_path, "w") as f: + f.write(code) + proc = subprocess.Popen( + [sys.executable, script_path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdoutdata, stderrdata = proc.communicate() + finally: + shutil.rmtree(temp_dir, ignore_errors=True) + return proc.returncode, stdoutdata, stderrdata + + +def _test_cupy_available(self): + returncode, stdoutdata, stderrdata = _run_script( + """ +import dpnp as cupy +print(cupy.is_available())""" + ) + assert returncode == 0, "stderr: {!r}".format(stderrdata) + assert stdoutdata in (b"True\n", b"True\r\n", b"False\n", b"False\r\n") + return stdoutdata == b"True\n" or stdoutdata == b"True\r\n" + + +class TestImportError(unittest.TestCase): + + def test_import_error(self): + returncode, stdoutdata, stderrdata = _run_script( + """ +try: + import dpnp as cupy +except Exception as e: + print(type(e).__name__) +""" + ) + assert returncode == 0, "stderr: {!r}".format(stderrdata) + assert stdoutdata in (b"", b"RuntimeError\n") + + +# if not cupy.cuda.runtime.is_hip: +# visible = "CUDA_VISIBLE_DEVICES" +# else: +# visible = "HIP_VISIBLE_DEVICES" + + +@pytest.mark.skip("dpnp.is_available() is not implemented") +class TestAvailable(unittest.TestCase): + + def test_available(self): + available = _test_cupy_available(self) + assert available + + +@pytest.mark.skip("dpnp.is_available() is not implemented") +class TestNotAvailable(unittest.TestCase): + + def setUp(self): + self.old = os.environ.get(visible) + + def tearDown(self): + if self.old is None: + os.environ.pop(visible) + else: + os.environ[visible] = self.old + + # @unittest.skipIf( + # cupy.cuda.runtime.is_hip, + # "HIP handles empty HIP_VISIBLE_DEVICES differently", + # ) + def test_no_device_1(self): + os.environ["CUDA_VISIBLE_DEVICES"] = " " + available = _test_cupy_available(self) + assert not available + + def test_no_device_2(self): + os.environ[visible] = "-1" + available = _test_cupy_available(self) + assert not available + + +@pytest.mark.skip("No memory pool API is supported") +class TestMemoryPool(unittest.TestCase): + + def test_get_default_memory_pool(self): + p = cupy.get_default_memory_pool() + assert isinstance(p, cupy.cuda.memory.MemoryPool) + + def test_get_default_pinned_memory_pool(self): + p = cupy.get_default_pinned_memory_pool() + assert isinstance(p, cupy.cuda.pinned_memory.PinnedMemoryPool) + + +@pytest.mark.skip("dpnp.show_config() is not implemented") +class TestShowConfig(unittest.TestCase): + + def test_show_config(self): + with mock.patch("sys.stdout.write") as write_func: + cupy.show_config() + write_func.assert_called_once_with( + str(cupyx.get_runtime_info(full=False)) + ) + + def test_show_config_with_handles(self): + with mock.patch("sys.stdout.write") as write_func: + cupy.show_config(_full=True) + write_func.assert_called_once_with( + str(cupyx.get_runtime_info(full=True)) + ) + + +class TestAliases(unittest.TestCase): + + def test_abs_is_absolute(self): + for xp in (numpy, cupy): + assert xp.abs is xp.absolute + + def test_conj_is_conjugate(self): + for xp in (numpy, cupy): + assert xp.conj is xp.conjugate + + def test_bitwise_not_is_invert(self): + for xp in (numpy, cupy): + assert xp.bitwise_not is xp.invert + + +@pytest.mark.skip("dpnp.exceptions is not implemented") +@testing.with_requires("numpy>=2.0") +@pytest.mark.parametrize( + "name", + [ + "exceptions.AxisError", + "exceptions.ComplexWarning", + "exceptions.ModuleDeprecationWarning", + "exceptions.RankWarning", + "exceptions.TooHardError", + "exceptions.VisibleDeprecationWarning", + "linalg.LinAlgError", + ], +) +def test_error_classes(name): + get = operator.attrgetter(name) + assert issubclass(get(cupy), get(numpy)) + + +# This is copied from chainer/testing/__init__.py, so should be replaced in +# some way. +if __name__ == "__main__": + import pytest + + pytest.main([__file__, "-vvs", "-x", "--pdb"]) diff --git a/dpnp/tests/third_party/cupy/test_ndim.py b/dpnp/tests/third_party/cupy/test_ndim.py index 13fd811a9904..02f103461f74 100644 --- a/dpnp/tests/third_party/cupy/test_ndim.py +++ b/dpnp/tests/third_party/cupy/test_ndim.py @@ -7,6 +7,7 @@ class TestNdim(unittest.TestCase): + @testing.numpy_cupy_equal() def test_ndim_ndarray1d(self, xp): return xp.ndim(xp.arange(5)) diff --git a/dpnp/tests/third_party/cupy/test_numpy_interop.py b/dpnp/tests/third_party/cupy/test_numpy_interop.py new file mode 100644 index 000000000000..0409c3fdaadc --- /dev/null +++ b/dpnp/tests/third_party/cupy/test_numpy_interop.py @@ -0,0 +1,195 @@ +import contextlib +import os +import unittest + +import numpy +import pytest + +import dpnp as cupy +from dpnp.tests.third_party.cupy import testing + +# import cupyx + +try: + import scipy.sparse + + scipy_available = True +except ImportError: + scipy_available = False + + +@pytest.mark.skip("dpnp.get_array_module() is not supported") +class TestGetArrayModule(unittest.TestCase): + + def test_get_array_module_1(self): + n1 = numpy.array([2], numpy.float32) + c1 = cupy.array([2], numpy.float32) + csr1 = cupyx.scipy.sparse.csr_matrix((5, 3), dtype=numpy.float32) + + assert numpy is cupy.get_array_module() + assert numpy is cupy.get_array_module(n1) + assert cupy is cupy.get_array_module(c1) + assert cupy is cupy.get_array_module(csr1) + + assert numpy is cupy.get_array_module(n1, n1) + assert cupy is cupy.get_array_module(c1, c1) + assert cupy is cupy.get_array_module(csr1, csr1) + + assert cupy is cupy.get_array_module(n1, csr1) + assert cupy is cupy.get_array_module(csr1, n1) + assert cupy is cupy.get_array_module(c1, n1) + assert cupy is cupy.get_array_module(n1, c1) + assert cupy is cupy.get_array_module(c1, csr1) + assert cupy is cupy.get_array_module(csr1, c1) + + if scipy_available: + csrn1 = scipy.sparse.csr_matrix((5, 3), dtype=numpy.float32) + + assert numpy is cupy.get_array_module(csrn1) + assert cupy is cupy.get_array_module(csrn1, csr1) + assert cupy is cupy.get_array_module(csr1, csrn1) + assert cupy is cupy.get_array_module(c1, csrn1) + assert cupy is cupy.get_array_module(csrn1, c1) + assert numpy is cupy.get_array_module(n1, csrn1) + assert numpy is cupy.get_array_module(csrn1, n1) + + +class MockArray(numpy.lib.mixins.NDArrayOperatorsMixin): + __array_priority__ = 20 # less than cupy.ndarray.__array_priority__ + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + assert method == "__call__" + name = ufunc.__name__ + return name, inputs, kwargs + + +@pytest.mark.skip("dpnp.__array_ufunc__ is not supported") +class TestArrayUfunc: + + def test_add(self): + x = cupy.array([3, 7]) + y = MockArray() + assert x + y == ("add", (x, y), {}) + assert y + x == ("add", (y, x), {}) + y2 = y + y2 += x + assert y2 == ("add", (y, x), {"out": y}) + with pytest.raises(TypeError): + x += y + + @pytest.mark.xfail( + reason="cupy.ndarray.__array_ufunc__ does not support gufuncs yet" + ) + def test_matmul(self): + x = cupy.array([3, 7]) + y = MockArray() + assert x @ y == ("matmul", (x, y), {}) + assert y @ x == ("matmul", (y, x), {}) + y2 = y + y2 @= x + assert y2 == ("matmul", (y, x), {"out": y}) + with pytest.raises(TypeError): + x @= y + + def test_lt(self): + x = cupy.array([3, 7]) + y = MockArray() + assert (x < y) == ("less", (x, y), {}) + assert (y < x) == ("less", (y, x), {}) + + +class MockArray2: + __array_ufunc__ = None + + def __add__(self, other): + return "add" + + def __radd__(self, other): + return "radd" + + def __matmul__(self, other): + return "matmul" + + def __rmatmul__(self, other): + return "rmatmul" + + def __lt__(self, other): + return "lt" + + def __gt__(self, other): + return "gt" + + +@pytest.mark.skip("dpnp.__array_ufunc__ is not supported") +class TestArrayUfuncOptout: + + def test_add(self): + x = cupy.array([3, 7]) + y = MockArray2() + assert x + y == "radd" + assert y + x == "add" + + def test_matmul(self): + x = cupy.array([3, 7]) + y = MockArray2() + assert x @ y == "rmatmul" + assert y @ x == "matmul" + + def test_lt(self): + x = cupy.array([3, 7]) + y = MockArray2() + assert (x < y) == "gt" + assert (y < x) == "lt" + + +class TestAsnumpy: + + def test_asnumpy(self): + x = testing.shaped_random((2, 3, 4), cupy, cupy.default_float_type()) + y = cupy.asnumpy(x) + testing.assert_array_equal(x, y) + + @pytest.mark.skip("out keyword is not supported") + def test_asnumpy_out(self): + x = testing.shaped_random((2, 3, 4), cupy, cupy.float64) + y = cupyx.empty_like_pinned(x) + y = cupy.asnumpy(x, out=y) + testing.assert_array_equal(x, y) + assert isinstance(y.base, cupy.cuda.PinnedMemoryPointer) + assert y.base.ptr == y.ctypes.data + + @pytest.mark.skip("blocking keyword is not supported") + @pytest.mark.skipif( + int(os.environ.get("CUPY_ENABLE_UMP", 0)) == 1, + reason="blocking or not is irrelevant when zero-copy is on", + ) + @pytest.mark.parametrize("blocking", (True, False)) + def test_asnumpy_blocking(self, blocking): + prefactor = 4 + a = cupy.random.random( + prefactor * 128 * 1024 * 1024, dtype=cupy.float64 + ) + cupy.cuda.Device().synchronize() + + # Idea: perform D2H copy on a nonblocking stream, during which we try + # to "corrupt" the host data via NumPy operation. If the copy is + # properly ordered, corruption would not be possible. Here we craft a + # problem size and use pinned memory to ensure the failure can be + # always triggered. (The CUDART API reference ("API synchronization + # behavior") states that copying between device and pageable memory + # "might be" synchronous, whereas between device and page-locked + # memory "should be" fully asynchronous.) + s = cupy.cuda.Stream(non_blocking=True) + with s: + c = cupyx.empty_pinned(a.shape, dtype=a.dtype) + cupy.asnumpy(a, out=c, blocking=blocking) + c[c.size // 2 :] = -1.0 # potential data race + s.synchronize() + + a[c.size // 2 :] = -1.0 + if not blocking: + ctx = pytest.raises(AssertionError) + else: + ctx = contextlib.nullcontext() + with ctx: + assert cupy.allclose(a, c) diff --git a/dpnp/tests/third_party/cupy/test_type_routines.py b/dpnp/tests/third_party/cupy/test_type_routines.py index c9ebfeb950c3..9e59baa7971d 100644 --- a/dpnp/tests/third_party/cupy/test_type_routines.py +++ b/dpnp/tests/third_party/cupy/test_type_routines.py @@ -31,6 +31,7 @@ def _generate_type_routines_input(xp, dtype, obj_type): ) ) class TestCanCast(unittest.TestCase): + @testing.for_all_dtypes_combination(names=("from_dtype", "to_dtype")) @testing.numpy_cupy_equal() def test_can_cast(self, xp, from_dtype, to_dtype): @@ -48,10 +49,11 @@ def test_can_cast(self, xp, from_dtype, to_dtype): @pytest.mark.skip("dpnp.common_type() is not implemented yet") class TestCommonType(unittest.TestCase): + @testing.numpy_cupy_equal() def test_common_type_empty(self, xp): ret = xp.common_type() - assert type(ret) == type + assert type(ret) is type return ret @testing.for_all_dtypes(no_bool=True) @@ -59,7 +61,7 @@ def test_common_type_empty(self, xp): def test_common_type_single_argument(self, xp, dtype): array = _generate_type_routines_input(xp, dtype, "array") ret = xp.common_type(array) - assert type(ret) == type + assert type(ret) is type return ret @testing.for_all_dtypes_combination( @@ -70,7 +72,7 @@ def test_common_type_two_arguments(self, xp, dtype1, dtype2): array1 = _generate_type_routines_input(xp, dtype1, "array") array2 = _generate_type_routines_input(xp, dtype2, "array") ret = xp.common_type(array1, array2) - assert type(ret) == type + assert type(ret) is type return ret @testing.for_all_dtypes() @@ -91,6 +93,7 @@ def test_common_type_bool(self, dtype): ) ) class TestResultType(unittest.TestCase): + @testing.for_all_dtypes_combination(names=("dtype1", "dtype2")) @testing.numpy_cupy_equal() def test_result_type(self, xp, dtype1, dtype2): diff --git a/dpnp/tests/third_party/cupy/test_typing.py b/dpnp/tests/third_party/cupy/test_typing.py new file mode 100644 index 000000000000..2256ff4ab82c --- /dev/null +++ b/dpnp/tests/third_party/cupy/test_typing.py @@ -0,0 +1,12 @@ +import pytest + +import dpnp as cupy + + +@pytest.mark.skip("dpnp.typing is not implemented yet") +class TestClassGetItem: + + def test_class_getitem(self): + from typing import Any + + cupy.ndarray[Any, Any] diff --git a/dpnp/tests/third_party/cupy/testing/_loops.py b/dpnp/tests/third_party/cupy/testing/_loops.py index aea6e77e042c..901d1111b0be 100644 --- a/dpnp/tests/third_party/cupy/testing/_loops.py +++ b/dpnp/tests/third_party/cupy/testing/_loops.py @@ -1229,9 +1229,9 @@ def for_dtypes_combination(types, names=("dtype",), full=None): """ types = list(types) - if len(types) == 1: - (name,) = names - return for_dtypes(types, name) + # if len(types) == 1: + # (name,) = names + # return for_dtypes(types, name) if full is None: full = int(os.environ.get("CUPY_TEST_FULL_COMBINATION", "0")) != 0