@@ -3988,20 +3988,7 @@ def set_vocab(self):
39883988 special_vocab ._set_special_token (
39893989 "bos" , tokenizer .get_added_vocab ()["<|endoftext|>" ]
39903990 )
3991- special_vocab ._set_special_token ("eom" , tokenizer .get_added_vocab ()["<|observation|>" ]) # 151338
3992-
3993- # Fix chat template syntax error in GLM-4.5 models
3994- if special_vocab .chat_template and isinstance (special_vocab .chat_template , str ):
3995- # Fix multiple syntax issues in GLM-4.5 chat template
3996- template = special_vocab .chat_template
3997- # Fix nested double quotes issue
3998- template = template .replace ('endswith("/nothink")' , "endswith('/nothink')" )
3999- # Fix any other potential parentheses/tuple issues
4000- template = template .replace (
4001- "not visible_text(m.content).endswith('/nothink'))" ,
4002- "not visible_text(m.content).endswith('/nothink')"
4003- )
4004- special_vocab .chat_template = template
3991+
40053992 special_vocab .add_to_gguf (self .gguf_writer )
40063993
40073994 def set_gguf_parameters (self ):
@@ -4048,7 +4035,7 @@ def modify_tensors(
40484035 name = name .replace ("language_model." , "" ) # for multimodal variants
40494036
40504037 # Handle main token embedding (but not layer-specific NextN embeddings)
4051- if name == "model.embed_tokens.weight" and ".layers." not in name :
4038+ if name == "model.embed_tokens.weight" :
40524039 return [(self .map_tensor_name ("token_embd.weight" ), data_torch )]
40534040
40544041 # Handle routed experts
0 commit comments