Skip to content

Commit 3ab9748

Browse files
Disable prompt weights on newbie te. (#11434)
1 parent 0aa7fa4 commit 3ab9748

File tree

2 files changed

+5
-3
lines changed

2 files changed

+5
-3
lines changed

comfy/sd1_clip.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
466466
return embed_out
467467

468468
class SDTokenizer:
469-
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, tokenizer_data={}, tokenizer_args={}):
469+
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
470470
if tokenizer_path is None:
471471
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
472472
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
@@ -513,6 +513,8 @@ def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedd
513513
self.embedding_size = embedding_size
514514
self.embedding_key = embedding_key
515515

516+
self.disable_weights = disable_weights
517+
516518
def _try_get_embedding(self, embedding_name:str):
517519
'''
518520
Takes a potential embedding name and tries to retrieve it.
@@ -547,7 +549,7 @@ def tokenize_with_weights(self, text:str, return_word_ids=False, tokenizer_optio
547549
min_padding = tokenizer_options.get("{}_min_padding".format(self.embedding_key), self.min_padding)
548550

549551
text = escape_important(text)
550-
if kwargs.get("disable_weights", False):
552+
if kwargs.get("disable_weights", self.disable_weights):
551553
parsed_weights = [(text, 1.0)]
552554
else:
553555
parsed_weights = token_weights(text, 1.0)

comfy/text_encoders/lumina2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def state_dict(self):
1414
class Gemma3_4BTokenizer(sd1_clip.SDTokenizer):
1515
def __init__(self, embedding_directory=None, tokenizer_data={}):
1616
tokenizer = tokenizer_data.get("spiece_model", None)
17-
super().__init__(tokenizer, pad_with_end=False, embedding_size=2560, embedding_key='gemma3_4b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}, tokenizer_data=tokenizer_data)
17+
super().__init__(tokenizer, pad_with_end=False, embedding_size=2560, embedding_key='gemma3_4b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}, disable_weights=True, tokenizer_data=tokenizer_data)
1818

1919
def state_dict(self):
2020
return {"spiece_model": self.tokenizer.serialize_model()}

0 commit comments

Comments
 (0)