Skip to content

Commit 4bca736

Browse files
Don't try to use clip_fea on t2v model.
1 parent b6fefe6 commit 4bca736

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

comfy/ldm/wan/model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,8 @@ def __init__(self,
378378

379379
if model_type == 'i2v':
380380
self.img_emb = MLPProj(1280, dim, operation_settings=operation_settings)
381+
else:
382+
self.img_emb = None
381383

382384
def forward_orig(
383385
self,
@@ -421,7 +423,7 @@ def forward_orig(
421423
# context
422424
context = self.text_embedding(torch.cat([context, context.new_zeros(context.size(0), self.text_len - context.size(1), context.size(2))], dim=1))
423425

424-
if clip_fea is not None:
426+
if clip_fea is not None and self.img_emb is not None:
425427
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
426428
context = torch.concat([context_clip, context], dim=1)
427429

0 commit comments

Comments
 (0)