Skip to content

Commit 2f56b6a

Browse files
committed
upd test
Signed-off-by: Avery Yingyi Huang <[email protected]>
1 parent 7327d0a commit 2f56b6a

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

tests/kernels/attention/test_flashinfer_trtllm_attention.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def test_flashinfer_trtllm_decode_with_baseline(
113113
kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
114114
kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
115115

116-
workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
116+
workspace_buffer = torch.zeros(128 * 1024 * 1024, dtype=torch.int8)
117117
wrapper = flashinfer.BatchDecodeWithPagedKVCacheWrapper(
118118
workspace_buffer,
119119
kv_layout,
@@ -247,7 +247,7 @@ def test_flashinfer_trtllm_prefill_with_baseline(
247247
kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
248248
kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
249249

250-
workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
250+
workspace_buffer = torch.zeros(128 * 1024 * 1024, dtype=torch.int8)
251251
wrapper = flashinfer.BatchPrefillWithPagedKVCacheWrapper(
252252
workspace_buffer, kv_layout)
253253
wrapper.plan(q_indptr,

0 commit comments

Comments
 (0)