Skip to content

Commit 349f7e5

Browse files
Update hpu_attn.py
1 parent f15d53e commit 349f7e5

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

vllm/attention/backends/hpu_attn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def __init__(
164164
MLACommonImpl.__init__(self, num_heads, head_size, scale, num_kv_heads,
165165
alibi_slopes, sliding_window, kv_cache_dtype,
166166
blocksparse_params, logits_soft_cap, attn_type,
167-
**kwargs)
167+
kv_sharing_target_layer_name, **kwargs)
168168
self.enable_fp8_attn = kv_cache_dtype == 'fp8_inc' and os.environ.get(
169169
'QUANT_CONFIG', None) is None
170170
self.matmul_qk = Matmul() if not self.enable_fp8_attn \

0 commit comments

Comments
 (0)