From 6c5637c7f69fd4d064bc6c43443dfb2e7796dc53 Mon Sep 17 00:00:00 2001 From: SahilCarterr <110806554+SahilCarterr@users.noreply.github.com> Date: Tue, 16 Sep 2025 12:12:56 +0100 Subject: [PATCH 1/2] FIxes enable_xformers_memory_efficient_attention() --- src/diffusers/models/attention.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index c720b379551f..e48ddce1dbda 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -241,7 +241,10 @@ def set_use_memory_efficient_attention_xformers( op_fw, op_bw = attention_op dtype, *_ = op_fw.SUPPORTED_DTYPES q = torch.randn((1, 2, 40), device="cuda", dtype=dtype) - _ = xops.memory_efficient_attention(q, q, q) + try: + _ = xops.memory_efficient_attention(q, q, q) + except: + _ = xops.ops.memory_efficient_attention(q, q, q) except Exception as e: raise e From ca45902f5d3fc12298dc5b6c0a99f853a941a182 Mon Sep 17 00:00:00 2001 From: SahilCarterr <110806554+SahilCarterr@users.noreply.github.com> Date: Wed, 17 Sep 2025 13:29:18 +0100 Subject: [PATCH 2/2] Update attention.py --- src/diffusers/models/attention.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index e48ddce1dbda..22ead5a436f1 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -241,10 +241,7 @@ def set_use_memory_efficient_attention_xformers( op_fw, op_bw = attention_op dtype, *_ = op_fw.SUPPORTED_DTYPES q = torch.randn((1, 2, 40), device="cuda", dtype=dtype) - try: - _ = xops.memory_efficient_attention(q, q, q) - except: - _ = xops.ops.memory_efficient_attention(q, q, q) + _ = xops.ops.memory_efficient_attention(q, q, q) except Exception as e: raise e