Skip to content

Commit 1cdd665

Browse files
yuchengliu1pytorchmergebot
authored andcommitted
fix test_verbose_logs_dynamic_shapes with MSVC (pytorch#159573)
Operator `typeid` have different outputs in different compiler. There is a good example in [cppreference](https://www.en.cppreference.com/w/cpp/language/typeid.html). Pull Request resolved: pytorch#159573 Approved by: https://github.com/angelayi, https://github.com/jansel
1 parent 7cb2dcd commit 1cdd665

File tree

1 file changed

+21
-6
lines changed

1 file changed

+21
-6
lines changed

test/inductor/test_compiled_autograd.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
from torch._dynamo.testing import normalize_gm
3030
from torch._dynamo.utils import counters
3131
from torch._inductor import config as inductor_config
32+
from torch._inductor.cpp_builder import is_msvc_cl
3233
from torch._inductor.test_case import run_tests, TestCase
3334
from torch.nn.attention.flex_attention import flex_attention
3435
from torch.nn.parallel import DistributedDataParallel as DDP
@@ -40,6 +41,7 @@
4041
from torch.testing._internal.common_utils import (
4142
instantiate_parametrized_tests,
4243
IS_S390X,
44+
IS_WINDOWS,
4345
parametrize,
4446
scoped_load_inline,
4547
skipIfWindows,
@@ -193,6 +195,18 @@ def model(i):
193195
for _ in range(3):
194196
self.run_as_subprocess(script)
195197

198+
def gen_cache_miss_log_prefix(self):
199+
if IS_WINDOWS:
200+
if is_msvc_cl():
201+
return "Cache miss due to new autograd node: struct "
202+
else:
203+
self.fail(
204+
"Compilers other than msvc have not yet been verified on Windows."
205+
)
206+
return ""
207+
else:
208+
return "Cache miss due to new autograd node: "
209+
196210
def test_reset(self):
197211
compiled_autograd.compiled_autograd_enabled = True
198212
torch._C._dynamo.compiled_autograd.set_autograd_compiler(lambda: None, True)
@@ -3146,7 +3160,7 @@ def test_logs(self):
31463160
self.assertEqual(counters["compiled_autograd"]["compiles"], 1)
31473161
assert "torch::autograd::AccumulateGrad (NodeCall" in logs.getvalue()
31483162
assert (
3149-
"Cache miss due to new autograd node: torch::autograd::GraphRoot"
3163+
self.gen_cache_miss_log_prefix() + "torch::autograd::GraphRoot"
31503164
not in logs.getvalue()
31513165
)
31523166

@@ -3353,7 +3367,6 @@ def fn(x, obj):
33533367
sum(1 for e in expected_logs if e in logs.getvalue()), len(expected_logs)
33543368
)
33553369

3356-
@skipIfWindows(msg="AssertionError: Scalars are not equal!")
33573370
def test_verbose_logs_cpp(self):
33583371
torch._logging.set_logs(compiled_autograd_verbose=True)
33593372

@@ -3381,8 +3394,9 @@ def fn():
33813394
self.check_output_and_recompiles(fn)
33823395

33833396
patterns1 = [
3384-
r".*Cache miss due to new autograd node: torch::autograd::GraphRoot \(NodeCall 0\) with key size (\d+), "
3385-
r"previous key sizes=\[\]\n",
3397+
r".*"
3398+
+ self.gen_cache_miss_log_prefix()
3399+
+ r"torch::autograd::GraphRoot \(NodeCall 0\) with key size (\d+), previous key sizes=\[\]\n",
33863400
]
33873401

33883402
all_logs = logs.getvalue()
@@ -3420,7 +3434,8 @@ def test_verbose_logs_dynamic_shapes(self):
34203434

34213435
actual_logs = logs.getvalue()
34223436
expected_logs = [
3423-
"Cache miss due to new autograd node: torch::autograd::GraphRoot (NodeCall 0) with key size 39, previous key sizes=[]",
3437+
self.gen_cache_miss_log_prefix()
3438+
+ "torch::autograd::GraphRoot (NodeCall 0) with key size 39, previous key sizes=[]",
34243439
]
34253440
for expected in expected_logs:
34263441
self.assertTrue(expected in actual_logs)
@@ -3451,7 +3466,7 @@ def fn():
34513466
fn()
34523467

34533468
unexpected_logs = [
3454-
"Cache miss due to new autograd node: torch::autograd::GraphRoot (NodeCall 0)"
3469+
self.gen_cache_miss_log_prefix() + "torch::autograd::GraphRoot (NodeCall 0)"
34553470
]
34563471

34573472
self.assertEqual(sum(1 for e in unexpected_logs if e in logs.getvalue()), 0)

0 commit comments

Comments
 (0)