Skip to content

Commit 2d2411e

Browse files
authored
Update attention_processor.py
refactor of picking a set element
1 parent 36d34fd commit 2d2411e

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ def set_use_memory_efficient_attention_xformers(
402402
dtype = None
403403
if attention_op is not None:
404404
op_fw, op_bw = attention_op
405-
dtype = list(op_fw.SUPPORTED_DTYPES)[0]
405+
dtype, *_ = op_fw.SUPPORTED_DTYPES
406406
q = torch.randn((1, 2, 40), device="cuda", dtype=dtype)
407407
_ = xformers.ops.memory_efficient_attention(q, q, q)
408408
except Exception as e:

0 commit comments

Comments
 (0)