From e74f03b05d88cf9c2f2b84b1c18f74ada5c0439b Mon Sep 17 00:00:00 2001 From: Jack Taylor Date: Fri, 7 Nov 2025 11:21:43 +0000 Subject: [PATCH] Fix some conflicts related to inductor UTs --- test/inductor/test_kernel_benchmark.py | 2 -- test/inductor/test_loop_ordering.py | 8 -------- 2 files changed, 10 deletions(-) diff --git a/test/inductor/test_kernel_benchmark.py b/test/inductor/test_kernel_benchmark.py index 48fb77982d1e1..8c19327e75d35 100644 --- a/test/inductor/test_kernel_benchmark.py +++ b/test/inductor/test_kernel_benchmark.py @@ -172,8 +172,6 @@ def f(a, b): max_autotune=True, max_autotune_gemm_backends="TRITON", shape_padding=False ) @fresh_cache() - @fresh_inductor_cache() - @skipIfRocm #This seems to be disabled upstream https://github.com/pytorch/pytorch/issues/118346 def test_mm_triton_kernel_benchmark(self): M = 2048 N = 2432 diff --git a/test/inductor/test_loop_ordering.py b/test/inductor/test_loop_ordering.py index 04cfbb914f303..c77b3574b2227 100644 --- a/test/inductor/test_loop_ordering.py +++ b/test/inductor/test_loop_ordering.py @@ -415,10 +415,6 @@ def f(x): self.assertEqual(1, metrics.generated_kernel_count) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+") - @skipIfRocm - # Related PR: https://github.com/pytorch/pytorch/pull/149369 - # This test can't function for ROCm because fp8 'mul_cuda' op is not supported - # in eager mode that is required here to check vs compiled results def test_fp8_cast_and_t(self): """ This test repros the not able to fuses issue in @@ -441,10 +437,6 @@ def f(x, scale): self.assertEqual(1, metrics.generated_kernel_count) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+") - @skipIfRocm - # Related PR: https://github.com/pytorch/pytorch/pull/149369 - # This test can't function for ROCm because fp8 'mul_cuda' op is not supported - # in eager mode that is required here to check vs compiled results def test_fp8_pattern_2(self): """ This test repros the fp8 fusion relation issue here: