Skip to content

Commit 9553a7e

Browse files
Merge pull request #3818 from jwatzman/master
Reduce peak memory usage when changing models
2 parents 28e6d4a + b50ff4f commit 9553a7e

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

modules/sd_models.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,9 @@ def load_model_weights(model, checkpoint_info):
173173
print(f"Global Step: {pl_sd['global_step']}")
174174

175175
sd = get_state_dict_from_checkpoint(pl_sd)
176-
missing, extra = model.load_state_dict(sd, strict=False)
176+
del pl_sd
177+
model.load_state_dict(sd, strict=False)
178+
del sd
177179

178180
if shared.cmd_opts.opt_channelslast:
179181
model.to(memory_format=torch.channels_last)
@@ -197,9 +199,10 @@ def load_model_weights(model, checkpoint_info):
197199

198200
model.first_stage_model.to(devices.dtype_vae)
199201

200-
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
201-
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
202-
checkpoints_loaded.popitem(last=False) # LRU
202+
if shared.opts.sd_checkpoint_cache > 0:
203+
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
204+
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
205+
checkpoints_loaded.popitem(last=False) # LRU
203206
else:
204207
print(f"Loading weights [{sd_model_hash}] from cache")
205208
checkpoints_loaded.move_to_end(checkpoint_info)

0 commit comments

Comments
 (0)