|
26 | 26 | set_default_dtype, |
27 | 27 | set_default_tensor_type, |
28 | 28 | TEST_SCIPY, |
29 | | - IS_MACOS, |
30 | 29 | IS_PPC, |
31 | | - IS_JETSON, |
32 | 30 | IS_WINDOWS, |
33 | 31 | IS_FBCODE, |
34 | 32 | IS_SANDCASTLE, |
@@ -1051,8 +1049,6 @@ def _float_to_int_conversion_helper(self, vals, device, dtype, refs=None): |
1051 | 1049 | # errors with UBSAN. These casts are deliberate in PyTorch, however, and |
1052 | 1050 | # NumPy may have the same behavior. |
1053 | 1051 | @onlyNativeDeviceTypes |
1054 | | - @unittest.skipIf(IS_MACOS or IS_JETSON, "Test is broken on MacOS and Jetson, \ |
1055 | | - see https://github.com/pytorch/pytorch/issues/38752") |
1056 | 1052 | @unittest.skipIf(IS_PPC, "Test is broken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671") |
1057 | 1053 | @dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64) |
1058 | 1054 | def test_float_to_int_conversion_finite(self, device, dtype): |
@@ -1081,10 +1077,10 @@ def test_float_to_int_conversion_finite(self, device, dtype): |
1081 | 1077 | self._float_to_int_conversion_helper(vals, device, dtype, refs) |
1082 | 1078 |
|
1083 | 1079 | # Note: CUDA will fail this test on most dtypes, often dramatically. |
| 1080 | + # Note: This test validates undefined behavior consistency in float-to-ints casts |
1084 | 1081 | # NB: torch.uint16, torch.uint32, torch.uint64 excluded as this |
1085 | 1082 | # nondeterministically fails, warning "invalid value encountered in cast" |
1086 | 1083 | @onlyCPU |
1087 | | - @unittest.skipIf(IS_MACOS, "Nonfinite conversion results on MacOS are different from others.") |
1088 | 1084 | @unittest.skipIf(IS_S390X, "Test fails for int16 on s390x. Needs investigation.") |
1089 | 1085 | @dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64) |
1090 | 1086 | def test_float_to_int_conversion_nonfinite(self, device, dtype): |
|
0 commit comments