Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions test/inductor/test_kernel_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,6 @@ def f(a, b):
max_autotune=True, max_autotune_gemm_backends="TRITON", shape_padding=False
)
@fresh_cache()
@fresh_inductor_cache()
@skipIfRocm #This seems to be disabled upstream https://github.com/pytorch/pytorch/issues/118346
def test_mm_triton_kernel_benchmark(self):
M = 2048
N = 2432
Expand Down
8 changes: 0 additions & 8 deletions test/inductor/test_loop_ordering.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,10 +415,6 @@ def f(x):
self.assertEqual(1, metrics.generated_kernel_count)

@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+")
@skipIfRocm
# Related PR: https://github.com/pytorch/pytorch/pull/149369
# This test can't function for ROCm because fp8 'mul_cuda' op is not supported
# in eager mode that is required here to check vs compiled results
def test_fp8_cast_and_t(self):
"""
This test repros the not able to fuses issue in
Expand All @@ -441,10 +437,6 @@ def f(x, scale):
self.assertEqual(1, metrics.generated_kernel_count)

@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, "FP8 requires H100+ and MI300+")
@skipIfRocm
# Related PR: https://github.com/pytorch/pytorch/pull/149369
# This test can't function for ROCm because fp8 'mul_cuda' op is not supported
# in eager mode that is required here to check vs compiled results
def test_fp8_pattern_2(self):
"""
This test repros the fp8 fusion relation issue here:
Expand Down