Skip to content

Commit b643eae

Browse files
Make minimum_inference_memory() depend on --reserve-vram
1 parent baa6b4d commit b643eae

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

comfy/model_management.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -370,12 +370,9 @@ def offloaded_memory(loaded_models, device):
370370
offloaded_mem += m.model_offloaded_memory()
371371
return offloaded_mem
372372

373-
def minimum_inference_memory():
374-
return (1024 * 1024 * 1024) * 1.2
375-
376-
EXTRA_RESERVED_VRAM = 200 * 1024 * 1024
373+
EXTRA_RESERVED_VRAM = 400 * 1024 * 1024
377374
if any(platform.win32_ver()):
378-
EXTRA_RESERVED_VRAM = 500 * 1024 * 1024 #Windows is higher because of the shared vram issue
375+
EXTRA_RESERVED_VRAM = 600 * 1024 * 1024 #Windows is higher because of the shared vram issue
379376

380377
if args.reserve_vram is not None:
381378
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024
@@ -384,6 +381,9 @@ def minimum_inference_memory():
384381
def extra_reserved_memory():
385382
return EXTRA_RESERVED_VRAM
386383

384+
def minimum_inference_memory():
385+
return (1024 * 1024 * 1024) * 0.8 + extra_reserved_memory()
386+
387387
def unload_model_clones(model, unload_weights_only=True, force_unload=True):
388388
to_unload = []
389389
for i in range(len(current_loaded_models)):

0 commit comments

Comments
 (0)