Skip to content

Commit 6a7bc35

Browse files
Use basic attention implementation for small inputs on old pytorch.
1 parent b3b5ddb commit 6a7bc35

File tree

1 file changed

+5
-2
lines changed

1 file changed

+5
-2
lines changed

comfy/ldm/modules/attention.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -351,8 +351,11 @@ def attention_pytorch(q, k, v, heads, mask=None):
351351
optimized_attention_masked = optimized_attention
352352

353353
def optimized_attention_for_device(device, mask=False, small_input=False):
354-
if small_input and model_management.pytorch_attention_enabled():
355-
return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
354+
if small_input:
355+
if model_management.pytorch_attention_enabled():
356+
return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
357+
else:
358+
return attention_basic
356359

357360
if device == torch.device("cpu"):
358361
return attention_sub_quad

0 commit comments

Comments
 (0)