Skip to content

Commit 32881f3

Browse files
mmoskalrkooo567
andauthored
[kernel] fix sliding window in prefix prefill Triton kernel (#4405)
Co-authored-by: SangBin Cho <[email protected]>
1 parent 5b8a7c1 commit 32881f3

File tree

6 files changed

+91
-23
lines changed

6 files changed

+91
-23
lines changed

tests/kernels/test_prefix_prefill.py

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,21 @@
1515
CUDA_DEVICES = [
1616
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
1717
]
18+
SLIDING_WINDOW = [0, 16, 64, 128, 256, 512, 2048]
1819

1920

2021
@pytest.mark.parametrize("num_heads", NUM_HEADS)
2122
@pytest.mark.parametrize("num_queries_per_kv", NUM_QUERIES_PER_KV)
2223
@pytest.mark.parametrize("head_size", HEAD_SIZES)
2324
@pytest.mark.parametrize("dtype", DTYPES)
2425
@pytest.mark.parametrize("device", CUDA_DEVICES)
26+
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOW)
2527
@torch.inference_mode()
2628
def test_contexted_kv_attention(
2729
num_heads: int,
2830
num_queries_per_kv: int,
2931
head_size: int,
32+
sliding_window: int,
3033
dtype: torch.dtype,
3134
device: str,
3235
) -> None:
@@ -123,12 +126,32 @@ def test_contexted_kv_attention(
123126

124127
# Warm up the Triton kernel by calling it once before actually measuring
125128
# generation time
126-
context_attention_fwd(query, k, v, output, k_cache, v_cache, block_table,
127-
b_start_loc, b_seq_len, b_ctx_len, max_input_len)
129+
context_attention_fwd(query,
130+
k,
131+
v,
132+
output,
133+
k_cache,
134+
v_cache,
135+
block_table,
136+
b_start_loc,
137+
b_seq_len,
138+
b_ctx_len,
139+
max_input_len,
140+
sliding_window=sliding_window)
128141
torch.cuda.synchronize()
129142
start_time = time.time()
130-
context_attention_fwd(query, k, v, output, k_cache, v_cache, block_table,
131-
b_start_loc, b_seq_len, b_ctx_len, max_input_len)
143+
context_attention_fwd(query,
144+
k,
145+
v,
146+
output,
147+
k_cache,
148+
v_cache,
149+
block_table,
150+
b_start_loc,
151+
b_seq_len,
152+
b_ctx_len,
153+
max_input_len,
154+
sliding_window=sliding_window)
132155
torch.cuda.synchronize()
133156
end_time = time.time()
134157
print(f"triton Time: {(end_time - start_time)*1000:.2f} ms")
@@ -156,6 +179,9 @@ def test_contexted_kv_attention(
156179

157180
attn_bias = BlockDiagonalCausalFromBottomRightMask.from_seqlens(
158181
subquery_lens, seq_lens)
182+
if sliding_window > 0:
183+
attn_bias = attn_bias.make_local_attention_from_bottomright(
184+
sliding_window)
159185
output_ref = xops.memory_efficient_attention_forward(
160186
query,
161187
key,

vllm/attention/backends/flash_attn.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ def forward(
249249
prefill_meta.context_lens,
250250
prefill_meta.max_subquery_len,
251251
self.alibi_slopes,
252+
self.sliding_window[0],
252253
)
253254
if decode_meta := attn_metadata.decode_metadata:
254255
# Decoding run.

vllm/attention/backends/rocm_flash_attn.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,7 @@ def forward(
307307
prefill_meta.context_lens,
308308
prefill_meta.max_subquery_len,
309309
self.alibi_slopes,
310+
self.sliding_window[0],
310311
)
311312

312313
if decode_meta := attn_metadata.decode_metadata:

vllm/attention/backends/xformers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ def forward(
246246
prefill_meta.context_lens,
247247
prefill_meta.max_subquery_len,
248248
self.alibi_slopes,
249+
self.sliding_window,
249250
)
250251
assert output[:num_prefill_tokens].shape == out.shape
251252
output[:num_prefill_tokens] = out

vllm/attention/ops/paged_attn.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@ def forward_prefix(
172172
context_lens: torch.Tensor,
173173
max_subquery_len: int,
174174
alibi_slopes: Optional[torch.Tensor],
175+
sliding_window: Optional[int],
175176
) -> torch.Tensor:
176177
output = torch.empty_like(query)
177178
context_attention_fwd(
@@ -188,6 +189,7 @@ def forward_prefix(
188189
context_lens,
189190
max_subquery_len,
190191
alibi_slopes,
192+
sliding_window,
191193
)
192194
return output
193195

vllm/attention/ops/prefix_prefill.py

Lines changed: 56 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def _fwd_kernel(
5050
BLOCK_DMODEL: tl.constexpr, # head size
5151
BLOCK_DMODEL_PADDED: tl.constexpr, # head size padded to a power of 2
5252
BLOCK_N: tl.constexpr,
53+
SLIDING_WINDOW: tl.constexpr,
5354
):
5455
cur_batch = tl.program_id(0)
5556
cur_head = tl.program_id(1)
@@ -62,42 +63,53 @@ def _fwd_kernel(
6263
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
6364
cur_batch_query_len = cur_batch_seq_len - cur_batch_ctx_len
6465

66+
# start position inside of the query
67+
# generally, N goes over kv, while M goes over query_len
6568
block_start_loc = BLOCK_M * start_m
6669

6770
# initialize offsets
71+
# [N]; starts at 0
6872
offs_n = tl.arange(0, BLOCK_N)
73+
# [D]; starts at 0
6974
offs_d = tl.arange(0, BLOCK_DMODEL_PADDED)
75+
# [M]; starts at current position in query
7076
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
77+
# [M,D]
7178
off_q = (
7279
(cur_batch_in_all_start_index + offs_m[:, None]) * stride_qbs +
7380
cur_head * stride_qh + offs_d[None, :] * stride_qd)
7481

7582
dim_mask = tl.where(
76-
tl.arange(0, BLOCK_DMODEL_PADDED) < BLOCK_DMODEL, 1, 0).to(tl.int1)
83+
tl.arange(0, BLOCK_DMODEL_PADDED) < BLOCK_DMODEL, 1,
84+
0).to(tl.int1) # [D]
7785

7886
q = tl.load(Q + off_q,
7987
mask=dim_mask[None, :] &
8088
(offs_m[:, None] < cur_batch_query_len),
81-
other=0.0)
89+
other=0.0) # [M,D]
8290

83-
# # initialize pointer to m and l
84-
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
85-
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
86-
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL_PADDED], dtype=tl.float32)
91+
# initialize pointer to m and l
92+
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") # [M]
93+
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) # [M]
94+
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL_PADDED],
95+
dtype=tl.float32) # [M,D]
8796

97+
# compute query against context (no causal mask here)
8898
for start_n in range(0, cur_batch_ctx_len, BLOCK_N):
8999
start_n = tl.multiple_of(start_n, BLOCK_N)
90100
# -- compute qk ----
91101
bn = tl.load(B_Loc + cur_batch * stride_b_loc_b +
92102
((start_n + offs_n) // block_size) * stride_b_loc_s,
93103
mask=(start_n + offs_n) < cur_batch_ctx_len,
94-
other=0)
104+
other=0) # [N]
105+
# [D,N]
95106
off_k = (bn[None, :] * stride_k_cache_bs +
96107
cur_kv_head * stride_k_cache_h +
97108
(offs_d[:, None] // x) * stride_k_cache_d +
98109
((start_n + offs_n[None, :]) % block_size) *
99110
stride_k_cache_bl +
100111
(offs_d[:, None] % x) * stride_k_cache_x)
112+
# [N,D]
101113
off_v = (
102114
bn[:, None] * stride_v_cache_bs +
103115
cur_kv_head * stride_v_cache_h +
@@ -106,23 +118,39 @@ def _fwd_kernel(
106118
k = tl.load(K_cache + off_k,
107119
mask=dim_mask[:, None] &
108120
((start_n + offs_n[None, :]) < cur_batch_ctx_len),
109-
other=0.0)
121+
other=0.0) # [D,N]
110122

111-
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
123+
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # [M,N]
112124
qk += tl.dot(q, k)
113125
qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk,
114126
float("-inf"))
115127
qk *= sm_scale
128+
if SLIDING_WINDOW > 0:
129+
# (cur_batch_ctx_len + offs_m[:, None]) are the positions of
130+
# Q entries in sequence
131+
# (start_n + offs_n[None, :]) are the positions of
132+
# KV entries in sequence
133+
# So the condition makes sure each entry in Q only attends
134+
# to KV entries not more than SLIDING_WINDOW away.
135+
#
136+
# We can't use -inf here, because the
137+
# sliding window may lead to the entire row being masked.
138+
# This then makes m_ij contain -inf, which causes NaNs in
139+
# exp().
140+
qk = tl.where((cur_batch_ctx_len + offs_m[:, None]) -
141+
(start_n + offs_n[None, :]) < SLIDING_WINDOW, qk,
142+
-10000)
116143

117144
# -- compute m_ij, p, l_ij
118-
m_ij = tl.max(qk, 1)
119-
p = tl.exp(qk - m_ij[:, None])
120-
l_ij = tl.sum(p, 1)
145+
m_ij = tl.max(qk, 1) # [M]
146+
p = tl.exp(qk - m_ij[:, None]) # [M,N]
147+
l_ij = tl.sum(p, 1) # [M]
121148
# -- update m_i and l_i
122-
m_i_new = tl.maximum(m_i, m_ij)
123-
alpha = tl.exp(m_i - m_i_new)
124-
beta = tl.exp(m_ij - m_i_new)
125-
l_i_new = alpha * l_i + beta * l_ij
149+
m_i_new = tl.maximum(m_i, m_ij) # [M]
150+
alpha = tl.exp(m_i - m_i_new) # [M]
151+
beta = tl.exp(m_ij - m_i_new) # [M]
152+
l_i_new = alpha * l_i + beta * l_ij # [M]
153+
126154
# -- update output accumulator --
127155
# scale p
128156
p_scale = beta / l_i_new
@@ -134,7 +162,7 @@ def _fwd_kernel(
134162
v = tl.load(V_cache + off_v,
135163
mask=dim_mask[None, :] &
136164
((start_n + offs_n[:, None]) < cur_batch_ctx_len),
137-
other=0.0)
165+
other=0.0) # [N,D]
138166

139167
p = p.to(v.dtype)
140168
acc += tl.dot(p, v)
@@ -149,8 +177,10 @@ def _fwd_kernel(
149177
k_ptrs = K + off_k
150178
v_ptrs = V + off_v
151179

180+
# block_mask is 0 when we're already past the current query length
152181
block_mask = tl.where(block_start_loc < cur_batch_query_len, 1, 0)
153182

183+
# compute query against itself (with causal mask)
154184
for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N):
155185
start_n = tl.multiple_of(start_n, BLOCK_N)
156186
# -- compute qk ----
@@ -163,8 +193,13 @@ def _fwd_kernel(
163193
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
164194
qk += tl.dot(q, k)
165195
qk *= sm_scale
196+
# apply causal mask
166197
qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk,
167198
float("-inf"))
199+
if SLIDING_WINDOW > 0:
200+
qk = tl.where(
201+
offs_m[:, None] -
202+
(start_n + offs_n[None, :]) < SLIDING_WINDOW, qk, -10000)
168203

169204
# -- compute m_ij, p, l_ij
170205
m_ij = tl.max(qk, 1)
@@ -636,15 +671,16 @@ def context_attention_fwd(q,
636671
b_seq_len,
637672
b_ctx_len,
638673
max_input_len,
639-
alibi_slopes=None):
674+
alibi_slopes=None,
675+
sliding_window=None):
640676

641677
cap = torch.cuda.get_device_capability()
642678
BLOCK = 128 if cap[0] >= 8 else 64
643679
# shape constraints
644680
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
645681
assert Lq == Lk and Lk == Lv
646682
# round up Lk to a power of 2 - this is required for Triton block size
647-
Lk_padded = 2**((Lk - 1).bit_length())
683+
Lk_padded = triton.next_power_of_2(Lk)
648684

649685
sm_scale = 1.0 / (Lq**0.5)
650686
batch, head = b_seq_len.shape[0], q.shape[1]
@@ -749,6 +785,7 @@ def context_attention_fwd(q,
749785
BLOCK_DMODEL=Lk,
750786
BLOCK_DMODEL_PADDED=Lk_padded,
751787
BLOCK_N=BLOCK,
788+
SLIDING_WINDOW=sliding_window if sliding_window is not None else 0,
752789
num_warps=num_warps,
753790
num_stages=1,
754791
)

0 commit comments

Comments
 (0)