|
8 | 8 | # RUN: %run_on_npu2% %pytest %s |
9 | 9 | # REQUIRES: xrt_python_bindings |
10 | 10 |
|
| 11 | +import pytest |
11 | 12 | import numpy as np |
12 | 13 | import tempfile |
13 | 14 | import os |
@@ -413,20 +414,27 @@ def test_cache_tensor_shapes(): |
413 | 414 | np.testing.assert_array_equal(result, expected) |
414 | 415 |
|
415 | 416 |
|
416 | | -def test_cache_tensor_dtypes(): |
| 417 | +@pytest.mark.parametrize( |
| 418 | + "dtype", |
| 419 | + [ |
| 420 | + np.int32, |
| 421 | + pytest.param( |
| 422 | + np.float32, |
| 423 | + marks=pytest.mark.xfail( |
| 424 | + reason="Suspected f32 kernel stack overflow when two runtime_sequence buffers map to same host-side buffer", |
| 425 | + strict=False, |
| 426 | + ), |
| 427 | + ), |
| 428 | + ], |
| 429 | +) |
| 430 | +def test_cache_tensor_dtypes(dtype): |
417 | 431 | """Test that different tensor dtypes work correctly with caching.""" |
418 | | - # Test with different dtypes |
419 | | - dtypes = [np.int32, np.float32] |
420 | | - results = [] |
421 | | - |
422 | | - for dtype in dtypes: |
423 | | - input_tensor = iron.arange(32, dtype=dtype) |
| 432 | + input_tensor = iron.arange(32, dtype=dtype) |
424 | 433 |
|
425 | | - # Apply transformation |
426 | | - transform(input_tensor, input_tensor, lambda x: x + 1) |
427 | | - result = input_tensor.numpy() |
428 | | - results.append(result) |
| 434 | + # Apply transformation |
| 435 | + transform(input_tensor, input_tensor, lambda x: x + 1) |
| 436 | + result = input_tensor.numpy() |
429 | 437 |
|
430 | | - # Verify expected results |
431 | | - expected = np.arange(32, dtype=dtype) + 1 |
432 | | - np.testing.assert_array_equal(result, expected) |
| 438 | + # Verify expected results |
| 439 | + expected = np.arange(32, dtype=dtype) + 1 |
| 440 | + np.testing.assert_array_equal(result, expected) |
0 commit comments