Skip to content

Commit 3ea3bc8

Browse files
Fix wan issues when prompt length is long.
1 parent 8e69e2d commit 3ea3bc8

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

comfy/ldm/wan/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ def forward_orig(
421421
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
422422

423423
# context
424-
context = self.text_embedding(torch.cat([context, context.new_zeros(context.size(0), self.text_len - context.size(1), context.size(2))], dim=1))
424+
context = self.text_embedding(context)
425425

426426
if clip_fea is not None and self.img_emb is not None:
427427
context_clip = self.img_emb(clip_fea) # bs x 257 x dim

comfy/text_encoders/wan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model
1111
class UMT5XXlTokenizer(sd1_clip.SDTokenizer):
1212
def __init__(self, embedding_directory=None, tokenizer_data={}):
1313
tokenizer = tokenizer_data.get("spiece_model", None)
14-
super().__init__(tokenizer, pad_with_end=False, embedding_size=4096, embedding_key='umt5xxl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=0)
14+
super().__init__(tokenizer, pad_with_end=False, embedding_size=4096, embedding_key='umt5xxl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=0)
1515

1616
def state_dict(self):
1717
return {"spiece_model": self.tokenizer.serialize_model()}

0 commit comments

Comments
 (0)