Skip to content

Commit 3e4cf98

Browse files
committed
Focus on attention and gemm
1 parent a11c0a6 commit 3e4cf98

File tree

2 files changed

+39
-39
lines changed

2 files changed

+39
-39
lines changed

benchmarks/run.py

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -28,53 +28,53 @@
2828
# Maps tritonbench op names to Helion kernel examples
2929
KERNEL_MAPPINGS: dict[str, tuple[str, str, str]] = {
3030
# <tritonbench_op_name>: (<tritonbench_module_path>, <helion_kernel_module_path>, <helion_kernel_function_name>)
31-
"vector_add": ("tritonbench.operators.vector_add.operator", "examples.add", "add"),
32-
"embedding": (
33-
"tritonbench.operators.embedding.operator",
34-
"examples.embedding",
35-
"embedding_tritonbench",
36-
),
37-
"vector_exp": (
38-
"tritonbench.operators.vector_exp.operator",
39-
"examples.exp",
40-
"exp_tritonbench",
41-
),
42-
"rms_norm": (
43-
"tritonbench.operators.rms_norm.operator",
44-
"examples.rms_norm",
45-
"rms_norm_tritonbench",
46-
),
47-
"sum": ("tritonbench.operators.sum.operator", "examples.sum", "sum_tritonbench"),
48-
"softmax": (
49-
"tritonbench.operators.softmax.operator",
50-
"examples.softmax",
51-
"softmax",
52-
),
53-
"jagged_mean": (
54-
"tritonbench.operators.jagged_mean.operator",
55-
"examples.jagged_mean",
56-
"jagged_mean_tritonbench",
57-
),
58-
"fp8_gemm": (
59-
"tritonbench.operators.fp8_gemm.fp8_gemm",
60-
"examples.fp8_gemm",
61-
"fp8_gemm_tritonbench",
62-
),
31+
# "vector_add": ("tritonbench.operators.vector_add.operator", "examples.add", "add"),
32+
# "embedding": (
33+
# "tritonbench.operators.embedding.operator",
34+
# "examples.embedding",
35+
# "embedding_tritonbench",
36+
# ),
37+
# "vector_exp": (
38+
# "tritonbench.operators.vector_exp.operator",
39+
# "examples.exp",
40+
# "exp_tritonbench",
41+
# ),
42+
# "rms_norm": (
43+
# "tritonbench.operators.rms_norm.operator",
44+
# "examples.rms_norm",
45+
# "rms_norm_tritonbench",
46+
# ),
47+
# "sum": ("tritonbench.operators.sum.operator", "examples.sum", "sum_tritonbench"),
48+
# "softmax": (
49+
# "tritonbench.operators.softmax.operator",
50+
# "examples.softmax",
51+
# "softmax",
52+
# ),
53+
# "cross_entropy": (
54+
# "tritonbench.operators.cross_entropy.operator",
55+
# "examples.cross_entropy",
56+
# "cross_entropy",
57+
# ),
58+
# "jagged_mean": (
59+
# "tritonbench.operators.jagged_mean.operator",
60+
# "examples.jagged_mean",
61+
# "jagged_mean_tritonbench",
62+
# ),
6363
"flash_attention": (
6464
"tritonbench.operators.flash_attention.operator",
6565
"examples.attention",
6666
"attention",
6767
),
68-
"cross_entropy": (
69-
"tritonbench.operators.cross_entropy.operator",
70-
"examples.cross_entropy",
71-
"cross_entropy",
72-
),
7368
"fp8_attention": (
7469
"tritonbench.operators.fp8_attention.operator",
7570
"examples.fp8_attention",
7671
"fp8_attention_tritonbench",
7772
),
73+
"fp8_gemm": (
74+
"tritonbench.operators.fp8_gemm.fp8_gemm",
75+
"examples.fp8_gemm",
76+
"fp8_gemm_tritonbench",
77+
),
7878
}
7979

8080

helion/autotuner/base_search.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,8 @@ def benchmark_function(self, config: Config, fn: CompiledConfig) -> float:
137137
except CompilationError:
138138
self.log.debug("Benchmarking failed: Triton CompilationError")
139139
except Exception as e:
140-
if not _expected_errors_regexp.search(str(e)) and not "exceeds triton maximum tensor numel" in str(e):
141-
raise exc.TritonError(f"{type(e).__qualname__}: {e}", config) from e
140+
# if not _expected_errors_regexp.search(str(e)) and not "exceeds triton maximum tensor numel" in str(e):
141+
# raise exc.TritonError(f"{type(e).__qualname__}: {e}", config) from e
142142
self.log.debug(f"Benchmarking failed: {type(e).__name__}: {e}")
143143
return inf
144144

0 commit comments

Comments
 (0)