Skip to content

Commit 9264655

Browse files
committed
remove some params for now.
1 parent 2fa8e53 commit 9264655

File tree

1 file changed

+0
-6
lines changed

1 file changed

+0
-6
lines changed

src/diffusers/models/attention_dispatch.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -576,9 +576,6 @@ def _flash_attention(
576576
query: torch.Tensor,
577577
key: torch.Tensor,
578578
value: torch.Tensor,
579-
dropout_p: float = 0.0,
580-
scale: Optional[float] = None,
581-
is_causal: bool = False,
582579
window_size: Tuple[int, int] = (-1, -1),
583580
softcap: float = 0.0,
584581
alibi_slopes: Optional[torch.Tensor] = None,
@@ -589,9 +586,6 @@ def _flash_attention(
589586
q=query,
590587
k=key,
591588
v=value,
592-
dropout_p=dropout_p,
593-
softmax_scale=scale,
594-
causal=is_causal,
595589
window_size=window_size,
596590
softcap=softcap,
597591
alibi_slopes=alibi_slopes,

0 commit comments

Comments
 (0)