Skip to content

Commit 38b7ac6

Browse files
Don't init the CLIP model when the checkpoint has no CLIP weights.
1 parent 0c9bc19 commit 38b7ac6

File tree

1 file changed

+6
-3
lines changed

1 file changed

+6
-3
lines changed

comfy/sd.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -470,10 +470,13 @@ class WeightsLoader(torch.nn.Module):
470470
w = WeightsLoader()
471471
clip_target = model_config.clip_target()
472472
if clip_target is not None:
473-
clip = CLIP(clip_target, embedding_directory=embedding_directory)
474-
w.cond_stage_model = clip.cond_stage_model
475473
sd = model_config.process_clip_state_dict(sd)
476-
load_model_weights(w, sd)
474+
if any(k.startswith('cond_stage_model.') for k in sd):
475+
clip = CLIP(clip_target, embedding_directory=embedding_directory)
476+
w.cond_stage_model = clip.cond_stage_model
477+
load_model_weights(w, sd)
478+
else:
479+
print("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.")
477480

478481
left_over = sd.keys()
479482
if len(left_over) > 0:

0 commit comments

Comments
 (0)