@@ -118,7 +118,7 @@ def from_pretrained(path, **kwargs):
118118class Mistral3Tokenizer (sd1_clip .SDTokenizer ):
119119 def __init__ (self , embedding_directory = None , tokenizer_data = {}):
120120 self .tekken_data = tokenizer_data .get ("tekken_model" , None )
121- super ().__init__ ("" , pad_with_end = False , embedding_size = 5120 , embedding_key = 'mistral3_24b' , tokenizer_class = MistralTokenizerClass , has_end_token = False , pad_to_max_length = False , pad_token = 11 , start_token = 1 , max_length = 99999999 , min_length = 1 , pad_left = True , tokenizer_args = load_mistral_tokenizer (self .tekken_data ), tokenizer_data = tokenizer_data )
121+ super ().__init__ ("" , pad_with_end = False , embedding_directory = embedding_directory , embedding_size = 5120 , embedding_key = 'mistral3_24b' , tokenizer_class = MistralTokenizerClass , has_end_token = False , pad_to_max_length = False , pad_token = 11 , start_token = 1 , max_length = 99999999 , min_length = 1 , pad_left = True , tokenizer_args = load_mistral_tokenizer (self .tekken_data ), tokenizer_data = tokenizer_data )
122122
123123 def state_dict (self ):
124124 return {"tekken_model" : self .tekken_data }
@@ -176,12 +176,12 @@ def __init__(self, device="cpu", dtype=None, model_options={}):
176176class Qwen3Tokenizer (sd1_clip .SDTokenizer ):
177177 def __init__ (self , embedding_directory = None , tokenizer_data = {}):
178178 tokenizer_path = os .path .join (os .path .dirname (os .path .realpath (__file__ )), "qwen25_tokenizer" )
179- super ().__init__ (tokenizer_path , pad_with_end = False , embedding_size = 2560 , embedding_key = 'qwen3_4b' , tokenizer_class = Qwen2Tokenizer , has_start_token = False , has_end_token = False , pad_to_max_length = False , max_length = 99999999 , min_length = 512 , pad_token = 151643 , tokenizer_data = tokenizer_data )
179+ super ().__init__ (tokenizer_path , pad_with_end = False , embedding_directory = embedding_directory , embedding_size = 2560 , embedding_key = 'qwen3_4b' , tokenizer_class = Qwen2Tokenizer , has_start_token = False , has_end_token = False , pad_to_max_length = False , max_length = 99999999 , min_length = 512 , pad_token = 151643 , tokenizer_data = tokenizer_data )
180180
181181class Qwen3Tokenizer8B (sd1_clip .SDTokenizer ):
182182 def __init__ (self , embedding_directory = None , tokenizer_data = {}):
183183 tokenizer_path = os .path .join (os .path .dirname (os .path .realpath (__file__ )), "qwen25_tokenizer" )
184- super ().__init__ (tokenizer_path , pad_with_end = False , embedding_size = 4096 , embedding_key = 'qwen3_8b' , tokenizer_class = Qwen2Tokenizer , has_start_token = False , has_end_token = False , pad_to_max_length = False , max_length = 99999999 , min_length = 512 , pad_token = 151643 , tokenizer_data = tokenizer_data )
184+ super ().__init__ (tokenizer_path , pad_with_end = False , embedding_directory = embedding_directory , embedding_size = 4096 , embedding_key = 'qwen3_8b' , tokenizer_class = Qwen2Tokenizer , has_start_token = False , has_end_token = False , pad_to_max_length = False , max_length = 99999999 , min_length = 512 , pad_token = 151643 , tokenizer_data = tokenizer_data )
185185
186186class KleinTokenizer (sd1_clip .SD1Tokenizer ):
187187 def __init__ (self , embedding_directory = None , tokenizer_data = {}, name = "qwen3_4b" ):
0 commit comments