We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent c6b9c11 commit 1556468Copy full SHA for 1556468
comfy/model_management.py
@@ -224,8 +224,11 @@ def is_nvidia():
224
torch.backends.cuda.enable_flash_sdp(True)
225
torch.backends.cuda.enable_mem_efficient_sdp(True)
226
227
-if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5:
228
- torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
+try:
+ if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5:
229
+ torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
230
+except:
231
+ logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp")
232
233
if args.lowvram:
234
set_vram_to = VRAMState.LOW_VRAM
0 commit comments