Skip to content

Commit 0d5f3fc

Browse files
committed
remove 'kernel_name'
Signed-off-by: Anatoly Myachev <[email protected]>
1 parent 0b90c7e commit 0d5f3fc

10 files changed

+17
-68
lines changed

benchmarks/triton_kernels_benchmark/benchmark_testing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def _summarize_statistics(times, quantiles, return_mode):
3737

3838

3939
def do_bench_ipex(fn, n_warmup=25, n_repeat=100, grad_to_none=None, quantiles=None, return_mode="mean", device="xpu",
40-
sync_submitting=True, kernel_name=None): # pylint: disable=unused-argument
40+
sync_submitting=True):
4141
"""
4242
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
4343
the 20-th and 80-th performance percentile.
@@ -108,7 +108,7 @@ def extract_kernels(funcs):
108108

109109

110110
def do_bench_elapsed_time(fn, n_warmup=25, n_repeat=100, grad_to_none=None, quantiles=None, return_mode="mean",
111-
device="xpu", kernel_name=None): # pylint: disable=unused-argument
111+
device="xpu"):
112112
"""
113113
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
114114
the 20-th and 80-th performance percentile.
@@ -160,7 +160,7 @@ def do_bench_elapsed_time(fn, n_warmup=25, n_repeat=100, grad_to_none=None, quan
160160

161161

162162
def do_bench_upstream_pytorch_profiler(fn, n_warmup=25, n_repeat=100, grad_to_none=None, quantiles=None,
163-
return_mode="mean", device="xpu", sync_submitting=True, kernel_name=None): # pylint: disable=unused-argument
163+
return_mode="mean", device="xpu", sync_submitting=True):
164164
"""
165165
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
166166
the 20-th and 80-th performance percentile.

benchmarks/triton_kernels_benchmark/flash_attention_fwd_benchmark.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,7 @@ def benchmark(Z, H, N_CTX, D_HEAD, CAUSAL, provider):
256256
), attn_mask=None, dropout_p=0.0, is_causal=CAUSAL, scale=sm_scale).to(torch.float32)
257257
atol = 1e-1 if N_CTX == 16384 else 1e-2
258258
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=atol, rtol=1e-3, err_msg='triton to torch')
259-
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10, quantiles=quantiles,
260-
kernel_name='_attn_fwd')
259+
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10, quantiles=quantiles)
261260

262261
elif provider == 'xetla':
263262
module_name = f'flash_attn_causal_{CAUSAL}'.lower()
@@ -272,8 +271,7 @@ def benchmark(Z, H, N_CTX, D_HEAD, CAUSAL, provider):
272271
l = torch.empty((size_ml, ), device='xpu', dtype=torch.float)
273272

274273
xetla_fn = lambda: func(q, k, v, out, dropout_mask, bias, m, l, Z, H, D_HEAD, N_CTX, N_CTX, sm_scale)
275-
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(xetla_fn, n_warmup=10, n_repeat=10, quantiles=quantiles,
276-
kernel_name='gpu::xetla::fmha::FmhaForwardKernel<')
274+
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(xetla_fn, n_warmup=10, n_repeat=10, quantiles=quantiles)
277275

278276
else:
279277
raise NotImplementedError(f'Unsupported provider {provider}')

benchmarks/triton_kernels_benchmark/fused_softmax.py

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -131,8 +131,7 @@ def benchmark(M, N, provider):
131131
triton_fn = lambda: softmax(x, out)
132132
torch_fn = lambda: torch.softmax(x, axis=-1)
133133
benchmark_suit.assert_close(triton_fn(), torch_fn(), err_msg="triton to torch")
134-
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, quantiles=quantiles, n_warmup=10, n_repeat=10,
135-
kernel_name="softmax_kernel")
134+
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, quantiles=quantiles, n_warmup=10, n_repeat=10)
136135

137136
elif provider == "torch-jit":
138137
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(lambda: naive_softmax(x), quantiles=quantiles,
@@ -145,17 +144,7 @@ def benchmark(M, N, provider):
145144
xetla_fn = lambda: func(x, out, 0)
146145
torch_fn = lambda: torch.softmax(x, axis=-1)
147146
# benchmark_suit.assert_close(xetla_fn(), torch_fn(), err_msg="xetla to torch")
148-
kernels_name = {
149-
"softmax_shape_4096_256": "mat1_4096x256_bf16_cfg0",
150-
"softmax_shape_4096_1024": "mat1_4096x1024_bf16_cfg0",
151-
"softmax_shape_4096_2048": "mat1_4096x2048_bf16_cfg0",
152-
"softmax_shape_4096_4096": "mat1_4096x4096_bf16_cfg0",
153-
"softmax_shape_4096_8192": "mat1_4096x8k_bf16_cfg0",
154-
"softmax_shape_4096_16384": "mat1_4096x16k_bf16_cfg0",
155-
"softmax_shape_4096_32768": "mat1_4096x32k_bf16_cfg0",
156-
}
157-
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(xetla_fn, quantiles=quantiles, n_warmup=10, n_repeat=10,
158-
kernel_name=kernels_name[name])
147+
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(xetla_fn, quantiles=quantiles, n_warmup=10, n_repeat=10)
159148

160149
else:
161150
raise NotImplementedError(f"Unsupported provider {provider}")

benchmarks/triton_kernels_benchmark/gemm_benchmark.py

Lines changed: 3 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def benchmark(B, M, N, K, provider):
284284
# Legacy profiler shows ~6000TFLOPS GeoMean for onednn measurements, so use more reliable method
285285
do_bench = do_bench_elapsed_time
286286
_, min_ms, max_ms, mean_ms, cv = do_bench(lambda: torch.matmul(torch_a, torch_b), n_warmup=10, n_repeat=10,
287-
quantiles=quantiles, kernel_name='gemm_kernel')
287+
quantiles=quantiles)
288288
elif provider == 'triton':
289289
assert len(a.shape) == len(b.shape), 'Incompatible sizes'
290290
if len(a.shape) == 3:
@@ -297,8 +297,7 @@ def benchmark(B, M, N, K, provider):
297297
rtol = 1e-2 if a.dtype == torch.bfloat16 else 1e-3
298298
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=rtol, err_msg='triton to torch')
299299
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10,
300-
quantiles=quantiles,
301-
kernel_name='matmul_kernel_with_block_pointers')
300+
quantiles=quantiles)
302301
elif provider == 'xetla':
303302
if B == 1:
304303
c = torch.empty((M, N), device='xpu', dtype=torch.float32)
@@ -317,37 +316,9 @@ def benchmark(B, M, N, K, provider):
317316
xetla_fn = lambda: func(a, b, c, acc, cnt)
318317
torch_fn = lambda: torch.matmul(a, b).to(torch.float32)
319318

320-
kernels_name = {
321-
'gemm_shape_1_1024_1024_1024': 'Test_1x1024x1024x1024_row_row',
322-
'gemm_shape_1_2048_2048_2048': 'Test_1x2048x2048x2048_row_row',
323-
'gemm_shape_1_4096_4096_4096': 'Test_1x4096x4096x4096_row_row',
324-
'gemm_shape_1_8192_8192_8192': 'Test_1x8192x8192x8192_row_row',
325-
'gemm_shape_1_1_5120_13824': 'Test_1x1x5120x13824_row_row',
326-
'gemm_shape_1_4_4096_12288': 'Test_1x4x4096x12288_row_row',
327-
'gemm_shape_1_512_8192_8192': 'Test_1x512x8192x8192_row_row',
328-
'gemm_shape_1_512_8192_32768': 'Test_1x512x8192x32768_row_row',
329-
'gemm_shape_1_512_32768_8192': 'Test_1x512x32768x8192_row_row',
330-
'gemm_shape_1_1024_16384_8192': 'Test_1x1024x16384x8192_row_row',
331-
'gemm_shape_1_1024_28672_8192': 'Test_1x1024x28672x8192_row_row',
332-
'gemm_shape_1_3072_4096_3072': 'Test_1x3072x4096x3072_row_row',
333-
'gemm_shape_1_4096_16384_8192': 'Test_1x4096x16384x8192_row_row',
334-
'gemm_shape_1_8192_16384_1024': 'Test_1x8192x16384x1024_row_row',
335-
'gemm_shape_1_8192_16384_4096': 'Test_1x8192x16384x4096_row_row',
336-
'gemm_shape_1_16384_1024_8192': 'Test_1x16384x1024x8192_row_row',
337-
'gemm_shape_1_16384_4096_8192': 'Test_1x16384x4096x8192_row_row',
338-
'gemm_shape_1_16384_8192_1024': 'Test_1x16384x8192x1024_row_row',
339-
'gemm_shape_1_16384_8192_4096': 'Test_1x16384x8192x4096_row_row',
340-
'gemm_shape_4_32768_128_4096': 'Test_4x32768x128x4096_row_row',
341-
'gemm_shape_4_32768_4096_128': 'Test_4x32768x4096x128_row_row',
342-
'gemm_shape_32_4096_4096_128': 'Test_32x4096x4096x128_row_row',
343-
'gemm_shape_4096_8_128_16384': 'Test_4096x8x128x16384_row_row',
344-
'gemm_shape_4096_8_16384_128': 'Test_4096x8x16384x128_row_row',
345-
'gemm_streamk_shape_3072_4096_3072': 'stream_k_gemm_run',
346-
}
347-
348319
# benchmark_suit.assert_close(xetla_fn(), torch_fn(), atol=1e-4, rtol=1.0, err_msg='xetla to torch')
349320
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(xetla_fn, n_warmup=10, n_repeat=10,
350-
quantiles=quantiles, kernel_name=kernels_name[name])
321+
quantiles=quantiles)
351322
else:
352323
raise NotImplementedError(f'Unsupported provider {provider}')
353324

benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -266,17 +266,15 @@ def benchmark(B, M, N, K, provider):
266266
assert len(a.shape) == len(b.shape), 'Incompatible sizes'
267267
if len(a.shape) == 3:
268268
c = torch.empty((B, M, N), device='xpu', dtype=torch.float32)
269-
kernel_name = 'matmul_kernel_with_block_pointers_batched'
270269
else:
271270
assert len(a.shape) == 2, 'Expecting shape of length 2'
272271
c = torch.empty((M, N), device='xpu', dtype=torch.float32)
273-
kernel_name = 'matmul_kernel_with_block_pointers'
274272
triton_fn = lambda: matmul(a, b, d, c)
275273
torch_fn = lambda: torch.matmul(a, b).to(torch.float32) + d
276274
rtol = 1e-2 if a.dtype == torch.bfloat16 else 1e-3
277275
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=rtol, err_msg='triton to torch')
278276
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10,
279-
quantiles=quantiles, kernel_name=kernel_name)
277+
quantiles=quantiles)
280278
else:
281279
raise NotImplementedError(f'Unsupported provider {provider}')
282280

benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -268,17 +268,15 @@ def benchmark(B, M, N, K, provider):
268268
assert len(a.shape) == len(b.shape), 'Incompatible sizes'
269269
if len(a.shape) == 3:
270270
c = torch.empty((B, M, N), device='xpu', dtype=torch.float32)
271-
kernel_name = 'matmul_kernel_with_block_pointers_batched'
272271
else:
273272
assert len(a.shape) == 2, 'Expecting shape of length 2'
274273
c = torch.empty((M, N), device='xpu', dtype=torch.float32)
275-
kernel_name = 'matmul_kernel_with_block_pointers'
276274
triton_fn = lambda: matmul(a, b, c)
277275
torch_fn = lambda: torch.nn.functional.gelu(torch.matmul(a, b).to(torch.float32))
278276
rtol = 1e-2 if a.dtype == torch.bfloat16 else 1e-3
279277
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=rtol, err_msg='triton to torch')
280278
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10,
281-
quantiles=quantiles, kernel_name=kernel_name)
279+
quantiles=quantiles)
282280
else:
283281
raise NotImplementedError(f'Unsupported provider {provider}')
284282

benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -256,17 +256,15 @@ def benchmark(B, M, N, K, provider):
256256
assert len(a.shape) == len(b.shape), 'Incompatible sizes'
257257
if len(a.shape) == 3:
258258
c = torch.empty((B, M, N), device='xpu', dtype=torch.float32)
259-
kernel_name = 'matmul_kernel_with_block_pointers_batched'
260259
else:
261260
assert len(a.shape) == 2, 'Expecting shape of length 2'
262261
c = torch.empty((M, N), device='xpu', dtype=torch.float32)
263-
kernel_name = 'matmul_kernel_with_block_pointers'
264262
triton_fn = lambda: matmul(a, b, c)
265263
torch_fn = lambda: torch.matmul(torch.exp(a), b).to(torch.float32)
266264
rtol = 1e-2 if a.dtype == torch.bfloat16 else 1e-3
267265
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=rtol, err_msg='triton to torch')
268266
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10,
269-
quantiles=quantiles, kernel_name=kernel_name)
267+
quantiles=quantiles)
270268
else:
271269
raise NotImplementedError(f'Unsupported provider {provider}')
272270

benchmarks/triton_kernels_benchmark/gemm_splitk_benchmark.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,7 @@ def benchmark(M, N, K, provider):
156156
torch_fn = lambda: torch.matmul(a, b).to(torch.float32)
157157
rtol = 1e-2 if a.dtype == torch.bfloat16 else 1e-3
158158
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=rtol, err_msg='triton to torch')
159-
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10, quantiles=quantiles,
160-
kernel_name='_kernel')
159+
_, min_ms, max_ms, mean, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10, quantiles=quantiles)
161160
else:
162161
raise NotImplementedError(f'Unsupported provider {provider}')
163162

benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,8 +280,7 @@ def benchmark(M, N, K, provider):
280280
torch_fn = lambda: torch.matmul(a, b).to(torch.float32)
281281
benchmark_suit.assert_close(triton_fn(), torch_fn(), atol=1e-4, rtol=1e-2, err_msg='triton to torch')
282282
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, n_warmup=10, n_repeat=10,
283-
quantiles=quantiles,
284-
kernel_name=['first_wave', 'full_tiles'])
283+
quantiles=quantiles)
285284
elif provider == 'xetla':
286285
c = torch.empty((M, N), device='xpu', dtype=torch.float32)
287286
acc = torch.empty((M, N), device='xpu', dtype=torch.float32)
@@ -294,7 +293,7 @@ def benchmark(M, N, K, provider):
294293

295294
# benchmark_suit.assert_close(xetla_fn(), torch_fn(), atol=1e-4, rtol=1.0, err_msg='xetla to torch')
296295
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(xetla_fn, n_warmup=10, n_repeat=10,
297-
quantiles=quantiles, kernel_name='stream_k_gemm_run')
296+
quantiles=quantiles)
298297
else:
299298
raise NotImplementedError(f'Unsupported provider {provider}')
300299

benchmarks/triton_kernels_benchmark/prefix_sums.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@ def benchmark(M, N, AXIS, provider):
4444

4545
if provider == 'triton':
4646
triton_fn = lambda: scan_kernel[(1, )](x, BLOCK_SIZE_M=M, BLOCK_SIZE_N=N, AXIS=AXIS)
47-
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, quantiles=quantiles,
48-
kernel_name='scan_kernel')
47+
_, min_ms, max_ms, mean_ms, cv = benchmark_suit.do_bench(triton_fn, quantiles=quantiles)
4948
else:
5049
raise NotImplementedError(f'Unsupported provider {provider}')
5150

0 commit comments

Comments
 (0)