Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions dpnp/tests/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,14 @@ def is_cuda_device(device=None):
return dev.backend == dpctl.backend_type.cuda


def is_gpu_device(device=None):
"""
Return True if a test is running on GPU device, False otherwise.
"""
dev = dpctl.select_default_device() if device is None else device
return dev.has_aspect_gpu


def is_win_platform():
"""
Return True if a test is running on Windows OS, False otherwise.
Expand Down
7 changes: 0 additions & 7 deletions dpnp/tests/skipped_tests.tbl
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,3 @@ tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.
tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray(x).astype(dpnp.int8)]

tests/third_party/intel/test_zero_copy_test1.py::test_dpnp_interaction_with_dpctl_memory

tests/test_umath.py::test_umaths[('divmod', 'ii')]
tests/test_umath.py::test_umaths[('divmod', 'll')]
tests/test_umath.py::test_umaths[('divmod', 'ff')]
tests/test_umath.py::test_umaths[('divmod', 'dd')]
tests/test_umath.py::test_umaths[('frexp', 'f')]
tests/test_umath.py::test_umaths[('frexp', 'd')]
2 changes: 0 additions & 2 deletions dpnp/tests/skipped_tests_cuda.tbl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
tests/test_arithmetic.py::TestArithmetic::test_modf_part1
tests/test_arithmetic.py::TestArithmetic::test_modf_part2
tests/test_sycl_queue.py::test_modf[cuda:gpu:0]
tests/test_umath.py::test_umaths[('modf', 'f')]
tests/test_umath.py::test_umaths[('modf', 'd')]
tests/third_party/cupy/math_tests/test_arithmetic.py::TestArithmeticModf::test_modf

# random
Expand Down
8 changes: 0 additions & 8 deletions dpnp/tests/skipped_tests_gpu.tbl
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,4 @@ tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: (dpnp
tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray([(i, i) for i in x], [("a", object), ("b", dpnp.int32)])]]
tests/test_random.py::TestPermutationsTestShuffle::test_shuffle1[lambda x: dpnp.asarray(x).astype(dpnp.int8)]

tests/test_umath.py::test_umaths[('divmod', 'ii')]
tests/test_umath.py::test_umaths[('divmod', 'll')]
tests/test_umath.py::test_umaths[('divmod', 'ff')]
tests/test_umath.py::test_umaths[('divmod', 'dd')]
tests/test_umath.py::test_umaths[('floor_divide', 'ff')]
tests/test_umath.py::test_umaths[('frexp', 'f')]
tests/test_umath.py::test_umaths[('frexp', 'd')]

tests/third_party/intel/test_zero_copy_test1.py::test_dpnp_interaction_with_dpctl_memory
1 change: 0 additions & 1 deletion dpnp/tests/skipped_tests_gpu_no_fp64.tbl

This file was deleted.

9 changes: 7 additions & 2 deletions dpnp/tests/test_random_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,12 @@
from dpnp.dpnp_array import dpnp_array
from dpnp.random import RandomState

from .helper import assert_dtype_allclose, get_array, is_cpu_device
from .helper import (
assert_dtype_allclose,
get_array,
is_cpu_device,
is_gpu_device,
)

# aspects of default device:
_def_device = dpctl.SyclQueue().sycl_device
Expand Down Expand Up @@ -688,7 +693,7 @@ def test_scalar(self, func):
],
)
def test_array_range(self, seed):
if not is_cpu_device():
if is_gpu_device():
pytest.skip("seed as a scalar is only supported on GPU")

size = 15
Expand Down
47 changes: 34 additions & 13 deletions dpnp/tests/test_umath.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
)

import dpnp
import dpnp.backend.extensions.vm._vm_impl as vmi
from dpnp.dpnp_utils import map_dtype_to_device

from .helper import (
Expand All @@ -20,21 +21,19 @@
get_float_dtypes,
has_support_aspect16,
has_support_aspect64,
is_cuda_device,
is_gpu_device,
)

# full list of umaths
umaths = [i for i in dir(numpy) if isinstance(getattr(numpy, i), numpy.ufunc)]

types = {
"d": numpy.float64,
"f": numpy.float32,
"l": numpy.int64,
"i": numpy.int32,
}

supported_types = "fli"
supported_types = "?bBhHiIlLkK"
if has_support_aspect16():
supported_types += "e"
supported_types += "fF"
if has_support_aspect64():
supported_types += "d"
supported_types += "dD"


def check_types(args_str):
Expand All @@ -55,7 +54,7 @@ def shaped_arange(shape, xp=numpy, dtype=numpy.float32):
def get_args(args_str, sh, xp=numpy):
args = []
for s in args_str:
args.append(shaped_arange(shape=sh, xp=xp, dtype=types[s]))
args.append(shaped_arange(shape=sh, xp=xp, dtype=numpy.dtype(s)))
return tuple(args)


Expand All @@ -75,6 +74,7 @@ def get_id(val):
return val.__str__()


@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
@pytest.mark.usefixtures("suppress_divide_invalid_numpy_warnings")
@pytest.mark.parametrize("test_cases", test_cases, ids=get_id)
def test_umaths(test_cases):
Expand All @@ -91,7 +91,7 @@ def test_umaths(test_cases):
iargs = get_args(args_str, sh, xp=dpnp)

if umath == "reciprocal":
if args[0].dtype in [numpy.int32, numpy.int64]:
if numpy.issubdtype(args[0].dtype, numpy.integer):
pytest.skip(
"For integer input array, numpy.reciprocal returns zero."
)
Expand All @@ -102,11 +102,32 @@ def test_umaths(test_cases):
and numpy.dtype("l") != numpy.int64
):
pytest.skip("numpy.ldexp doesn't have a loop for the input types")
elif (
umath == "floor_divide"
and args[0].dtype in [dpnp.float16, dpnp.float32]
and is_gpu_device()
):
pytest.skip("dpctl-1652")
elif umath in ["ceil", "floor", "trunc"] and args[0].dtype == dpnp.bool:
pytest.skip("dpctl-2030")
elif (
umath == "tan"
and dpnp.issubdtype(args[0].dtype, dpnp.complexfloating)
and not (vmi._is_available() and has_support_aspect64())
):
pytest.skip("dpctl-2031")
elif umath in ["divmod", "frexp"]:
pytest.skip("Not implemented umath")
elif umath == "modf":
if args[0].dtype == dpnp.float16:
pytest.skip("dpnp.modf is not supported with dpnp.float16")
elif is_cuda_device():
pytest.skip("dpnp.modf is not supported on CUDA device")

expected = getattr(numpy, umath)(*args)
result = getattr(dpnp, umath)(*iargs)

assert_allclose(result, expected, rtol=1e-6)
for x, y in zip(result, expected):
assert_dtype_allclose(x, y)


class TestArctan2:
Expand Down
Loading