File tree Expand file tree Collapse file tree 2 files changed +16
-2
lines changed
Expand file tree Collapse file tree 2 files changed +16
-2
lines changed Original file line number Diff line number Diff line change 1919import comfy .ops
2020ops = comfy .ops .disable_weight_init
2121
22+ FORCE_UPCAST_ATTENTION_DTYPE = model_management .force_upcast_attention_dtype ()
2223
2324def get_attn_precision (attn_precision ):
2425 if args .dont_upcast_attention :
2526 return None
26- if attn_precision is None and args . force_upcast_attention :
27- return torch . float32
27+ if FORCE_UPCAST_ATTENTION_DTYPE is not None :
28+ return FORCE_UPCAST_ATTENTION_DTYPE
2829 return attn_precision
2930
3031def exists (val ):
Original file line number Diff line number Diff line change 55import comfy .utils
66import torch
77import sys
8+ import platform
89
910class VRAMState (Enum ):
1011 DISABLED = 0 #No vram present: no need to move models to vram
@@ -685,6 +686,18 @@ def pytorch_attention_flash_attention():
685686 return True
686687 return False
687688
689+ def force_upcast_attention_dtype ():
690+ upcast = args .force_upcast_attention
691+ try :
692+ if platform .mac_ver ()[0 ] in ['14.5' ]: #black image bug on OSX Sonoma 14.5
693+ upcast = True
694+ except :
695+ pass
696+ if upcast :
697+ return torch .float32
698+ else :
699+ return None
700+
688701def get_free_memory (dev = None , torch_free_too = False ):
689702 global directml_enabled
690703 if dev is None :
You can’t perform that action at this time.
0 commit comments