Skip to content

Commit 9126995

Browse files
Print memory summary on OOM to help with debugging.
1 parent 9a552df commit 9126995

File tree

2 files changed

+5
-0
lines changed

2 files changed

+5
-0
lines changed

comfy/model_management.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1542,6 +1542,10 @@ def soft_empty_cache(force=False):
15421542
def unload_all_models():
15431543
free_memory(1e30, get_torch_device())
15441544

1545+
def debug_memory_summary():
1546+
if is_amd() or is_nvidia():
1547+
return torch.cuda.memory.memory_summary()
1548+
return ""
15451549

15461550
#TODO: might be cleaner to put this somewhere else
15471551
import threading

execution.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -601,6 +601,7 @@ async def await_completion():
601601

602602
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
603603
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
604+
logging.info("Memory summary: {}".format(comfy.model_management.debug_memory_summary()))
604605
logging.error("Got an OOM, unloading all loaded models.")
605606
comfy.model_management.unload_all_models()
606607

0 commit comments

Comments
 (0)