Skip to content

Commit 1cd6cd6

Browse files
Disable pytorch attention in VAE for AMD.
1 parent d7b4bf2 commit 1cd6cd6

File tree

2 files changed

+6
-1
lines changed

2 files changed

+6
-1
lines changed

comfy/ldm/modules/diffusionmodules/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def vae_attention():
297297
if model_management.xformers_enabled_vae():
298298
logging.info("Using xformers attention in VAE")
299299
return xformers_attention
300-
elif model_management.pytorch_attention_enabled():
300+
elif model_management.pytorch_attention_enabled_vae():
301301
logging.info("Using pytorch attention in VAE")
302302
return pytorch_attention
303303
else:

comfy/model_management.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -912,6 +912,11 @@ def pytorch_attention_enabled():
912912
global ENABLE_PYTORCH_ATTENTION
913913
return ENABLE_PYTORCH_ATTENTION
914914

915+
def pytorch_attention_enabled_vae():
916+
if is_amd():
917+
return False # enabling pytorch attention on AMD currently causes crash when doing high res
918+
return pytorch_attention_enabled()
919+
915920
def pytorch_attention_flash_attention():
916921
global ENABLE_PYTORCH_ATTENTION
917922
if ENABLE_PYTORCH_ATTENTION:

0 commit comments

Comments
 (0)