diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index c99133f257a5..3f59c8da8ea7 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -241,7 +241,7 @@ def set_use_memory_efficient_attention_xformers( op_fw, op_bw = attention_op dtype, *_ = op_fw.SUPPORTED_DTYPES q = torch.randn((1, 2, 40), device="cuda", dtype=dtype) - _ = xops.memory_efficient_attention(q, q, q) + _ = xops.ops.memory_efficient_attention(q, q, q) except Exception as e: raise e