Skip to content

Commit 2d7c198

Browse files
entrpnyiyixuxu
andauthored
Update src/diffusers/models/attention_processor.py
Co-authored-by: YiYi Xu <[email protected]>
1 parent e3ce2ba commit 2d7c198

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def __init__(
297297
self.set_processor(processor)
298298

299299
def set_use_xla_flash_attention(
300-
self, use_xla_flash_attention: bool, partition_spec: Optional[Tuple[Optional[str], ...]] = None, **kwargs
300+
self, use_xla_flash_attention: bool, partition_spec: Optional[Tuple[Optional[str], ...]] = None, is_flux = False,
301301
) -> None:
302302
r"""
303303
Set whether to use xla flash attention from `torch_xla` or not.

0 commit comments

Comments
 (0)