Skip to content

Commit 89423f3

Browse files
authored
Fix warnings issue in test_tensor_with_grad_to_scalar_warning (#2386)
This PR is to cherry-pick test_torch.py changes from upstream [22d1359](pytorch@22d1359). This fixes the below error: ``` _____________________________________________________________________________ TestTorch.test_tensor_with_grad_to_scalar_warning ______________________________________________________________________________ Traceback (most recent call last): File "/root/PR/pytorch/test/test_torch.py", line 10849, in test_tensor_with_grad_to_scalar_warning self.assertEqual(len(w), 1) File "/opt/venv/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py", line 4114, in assertEqual raise error_metas.pop()[0].to_error( # type: ignore[index] AssertionError: Scalars are not equal! Expected 1 but got 2. Absolute difference: 1 Relative difference: 1.0 To execute this test, run the following from the base repo dir: python test/test_torch.py TestTorch.test_tensor_with_grad_to_scalar_warning This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0 ``` This fixes https://ontrack-internal.amd.com/browse/SWDEV-532432
1 parent a97f45c commit 89423f3

File tree

1 file changed

+3
-6
lines changed

1 file changed

+3
-6
lines changed

test/test_torch.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
4444
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
4545
bytes_to_scalar, parametrize, skipIfMPS, noncontiguous_like,
46-
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
46+
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo, set_warn_always_context)
4747
from multiprocessing.reduction import ForkingPickler
4848
from torch.testing._internal.common_device_type import (
4949
expectedFailureMeta,
@@ -10830,8 +10830,8 @@ def test_bf16_supported_on_cpu(self):
1083010830
self.assertFalse(torch.cuda.is_bf16_supported())
1083110831

1083210832
def test_tensor_with_grad_to_scalar_warning(self) -> None:
10833-
10834-
with warnings.catch_warnings(record=True) as w:
10833+
with (warnings.catch_warnings(record=True) as w,
10834+
set_warn_always_context(True)):
1083510835
warnings.simplefilter("always")
1083610836

1083710837
x = torch.tensor(2.0, requires_grad=True)
@@ -10844,9 +10844,6 @@ def test_tensor_with_grad_to_scalar_warning(self) -> None:
1084410844
str(w[0].message)
1084510845
)
1084610846

10847-
_ = math.pow(x, 3) # calling it again does not result in a second warning
10848-
self.assertEqual(len(w), 1)
10849-
1085010847
# The following block extends TestTorch with negative dim wrapping tests
1085110848
# FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests
1085210849
# Functions to test negative dimension wrapping

0 commit comments

Comments
 (0)