Skip to content

Commit a11dcbd

Browse files
authored
[SWDEV-563266] [SWDEV-563264] Fix some conflicts related to inductor UTs (#2796)
Must have been some merge conflicts causing this
1 parent 29e3779 commit a11dcbd

File tree

2 files changed

+0
-10
lines changed

2 files changed

+0
-10
lines changed

test/inductor/test_kernel_benchmark.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,6 @@ def f(a, b):
172172
max_autotune=True, max_autotune_gemm_backends="TRITON", shape_padding=False
173173
)
174174
@fresh_cache()
175-
@fresh_inductor_cache()
176-
@skipIfRocm #This seems to be disabled upstream https://github.com/pytorch/pytorch/issues/118346
177175
def test_mm_triton_kernel_benchmark(self):
178176
M = 2048
179177
N = 2432

test/inductor/test_loop_ordering.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -415,10 +415,6 @@ def f(x):
415415
self.assertEqual(1, metrics.generated_kernel_count)
416416

417417
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+")
418-
@skipIfRocm
419-
# Related PR: https://github.com/pytorch/pytorch/pull/149369
420-
# This test can't function for ROCm because fp8 'mul_cuda' op is not supported
421-
# in eager mode that is required here to check vs compiled results
422418
def test_fp8_cast_and_t(self):
423419
"""
424420
This test repros the not able to fuses issue in
@@ -441,10 +437,6 @@ def f(x, scale):
441437
self.assertEqual(1, metrics.generated_kernel_count)
442438

443439
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+")
444-
@skipIfRocm
445-
# Related PR: https://github.com/pytorch/pytorch/pull/149369
446-
# This test can't function for ROCm because fp8 'mul_cuda' op is not supported
447-
# in eager mode that is required here to check vs compiled results
448440
def test_fp8_pattern_2(self):
449441
"""
450442
This test repros the fp8 fusion relation issue here:

0 commit comments

Comments
 (0)