We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 943b4a8 commit 66a6811Copy full SHA for 66a6811
src/diffusers/models/attention_dispatch.py
@@ -52,7 +52,6 @@
52
_CAN_USE_NPU_ATTN = is_torch_npu_available()
53
_CAN_USE_XLA_ATTN = is_torch_xla_available() and is_torch_xla_version(">=", _REQUIRED_XLA_VERSION)
54
_CAN_USE_XFORMERS_ATTN = is_xformers_available() and is_xformers_version(">=", _REQUIRED_XFORMERS_VERSION)
55
-
56
if _CAN_USE_FLASH_ATTN:
57
from flash_attn import flash_attn_func, flash_attn_varlen_func
58
else:
@@ -141,7 +140,6 @@ def wrap(func):
141
140
142
_custom_op = custom_op_no_op
143
_register_fake = register_fake_no_op
144
145
logger = get_logger(__name__) # pylint: disable=invalid-name
146
147
# TODO(aryan): Add support for the following:
0 commit comments