Skip to content

Commit 2cc227d

Browse files
int3Jokeren
andauthored
[TESTING] Remove the fast_flush parameter from do_bench (#4485)
The parameter was introduced in triton-lang/triton#840, and it looks like it exists mainly to ease migration. In general there's no reason to use fast_flush=False, so let's remove it. --------- Co-authored-by: Keren Zhou <[email protected]>
1 parent c54f988 commit 2cc227d

File tree

1 file changed

+2
-8
lines changed

1 file changed

+2
-8
lines changed

python/triton/testing.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,7 @@ def do_bench_cudagraph(fn, rep=20, grad_to_none=None, quantiles=None, return_mod
9292
return _summarize_statistics(torch.tensor(ret), quantiles, return_mode)
9393

9494

95-
def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flush=True, return_mode="mean",
96-
device_type="cuda"):
95+
def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, return_mode="mean", device_type="cuda"):
9796
"""
9897
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
9998
the 20-th and 80-th performance percentile.
@@ -108,8 +107,6 @@ def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flu
108107
:type grad_to_none: torch.tensor, optional
109108
:param quantiles: Performance percentile to return in addition to the median.
110109
:type quantiles: list[float], optional
111-
:param fast_flush: Use faster kernel to flush L2 cache between measurements
112-
:type fast_flush: bool, default is True
113110
:param return_mode: The statistical measure to return. Options are "min", "max", "mean", "median", or "all" Default is "mean". :type return_mode: str
114111
"""
115112
assert return_mode in ["min", "max", "mean", "median", "all"]
@@ -124,10 +121,7 @@ def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flu
124121
# before each kernel call to make sure that the L2 cache
125122
# doesn't contain any input data before the run
126123
cache_size = 256 * 1024 * 1024
127-
if fast_flush:
128-
cache = torch.empty(int(cache_size // 4), dtype=torch.int, device=device_type)
129-
else:
130-
cache = torch.empty(int(cache_size), dtype=torch.int8, device=device_type)
124+
cache = torch.empty(int(cache_size // 4), dtype=torch.int, device='cuda')
131125

132126
# Estimate the runtime of the function
133127
start_event = di.Event(enable_timing=True)

0 commit comments

Comments
 (0)