Skip to content

Commit facac0f

Browse files
committed
Revert "add cache flush to flex att bs=16"
This reverts commit 19e276e.
1 parent 19e276e commit facac0f

File tree

1 file changed

+0
-1
lines changed

1 file changed

+0
-1
lines changed

benchmarks/triton_kernels_benchmark/flex_attention_benchmark_causal_mask.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ def causal_mask(_, __, q_idx, kv_idx):
149149
))
150150
def benchmark(Z, H_q, H_kv, N_CTX_q, N_CTX_kv, D_HEAD_qk, D_HEAD_v, MODE, provider):
151151
# Maximum across torch=200, triton=600
152-
torch.xpu.empty_cache()
153152
do_bench = benchmark_suite.get_do_bench(n_warmup=600, n_repeat=10, quantiles=[0.5, 0.0, 1.0])
154153
if MODE not in ('fwd', 'bwd'):
155154
raise ValueError(f"Invalid MODE: {MODE}. Expected 'fwd' or 'bwd'.")

0 commit comments

Comments
 (0)