We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 867c2b3 commit 676b200Copy full SHA for 676b200
vllm/v1/attention/backends/rocm_aiter_fa.py
@@ -429,7 +429,6 @@ def __init__(
429
attn_type: AttentionType = AttentionType.DECODER,
430
kv_sharing_target_layer_name: Optional[int] = None,
431
sinks: Optional[torch.Tensor] = None,
432
- sinks: Optional[torch.Tensor] = None,
433
) -> None:
434
self.num_heads = num_heads
435
self.head_size = head_size
0 commit comments