Skip to content

Commit f1dbffb

Browse files
committed
cleanup boolean logic
Signed-off-by: Sage Moore <[email protected]>
1 parent ae056e1 commit f1dbffb

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

vllm/attention/backends/mla/common.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1323,8 +1323,7 @@ def _compute_prefill_context(
13231323
[0, q.shape[-1] - v.shape[-1]],
13241324
value=0)
13251325

1326-
if is_hip and envs.VLLM_USE_TRITON_FLASH_ATTN and \
1327-
has_context is False:
1326+
if is_hip and envs.VLLM_USE_TRITON_FLASH_ATTN and not has_context:
13281327
attn_output, attn_softmax_lse = self.triton_fa_func(
13291328
q,
13301329
k,
@@ -1413,7 +1412,7 @@ def _forward_prefill(
14131412
v_padded = torch.nn.functional.pad(v, [0, q.shape[-1] - v.shape[-1]],
14141413
value=0)
14151414

1416-
if is_hip and envs.VLLM_USE_TRITON_FLASH_ATTN and has_context is False:
1415+
if is_hip and envs.VLLM_USE_TRITON_FLASH_ATTN and not has_context:
14171416
output = self.triton_fa_func(
14181417
q,
14191418
k,

0 commit comments

Comments
 (0)