From 4e0c059fbe32fbb24f61d4cb58f0fe1d5331bd96 Mon Sep 17 00:00:00 2001 From: matteo Date: Thu, 24 Apr 2025 17:35:32 +0200 Subject: [PATCH 1/6] fix wrong template in GLM4-0414 --- src/llama-chat.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 41f89e3a9d3bd..7fa46115ed16e 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -123,6 +123,9 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { + if (tmpl_contains("[gMASK]")) { /* new GLM4 0414 models */ + return LLM_CHAT_TEMPLATE_CHATGML_4; + } return tmpl_contains("") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; } else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) { return LLM_CHAT_TEMPLATE_GLMEDGE; From 0cce5803cb0c6034bfa3bd629718c60bfaf6d711 Mon Sep 17 00:00:00 2001 From: matteo serva Date: Thu, 24 Apr 2025 18:32:15 +0200 Subject: [PATCH 2/6] fix spaces --- src/llama-chat.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 7fa46115ed16e..40a31399e9672 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -124,8 +124,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_PHI_3; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { if (tmpl_contains("[gMASK]")) { /* new GLM4 0414 models */ - return LLM_CHAT_TEMPLATE_CHATGML_4; - } + return LLM_CHAT_TEMPLATE_CHATGML_4; + } return tmpl_contains("") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; } else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) { return LLM_CHAT_TEMPLATE_GLMEDGE; @@ -158,7 +158,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("[gMASK]sop")) { // chatglm3-6b return LLM_CHAT_TEMPLATE_CHATGML_3; - } else if (tmpl_contains("[gMASK]")) { + } else if (tmpl_contains("[gMASK]")) { /* old GLM4 models */ return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains(LU8("<用户>"))) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF From cc932928d0882293ad57c798ca137359c46c2d0f Mon Sep 17 00:00:00 2001 From: matteo serva Date: Thu, 24 Apr 2025 19:01:24 +0200 Subject: [PATCH 3/6] no bos token since it is already in the template --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index ea3a951b93753..d4fec408dd202 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -5154,7 +5154,7 @@ def set_vocab(self): special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"]) special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) - special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"]) + special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["<|endoftext|>"]) special_vocab.add_to_gguf(self.gguf_writer) def set_gguf_parameters(self): From 36f927fd552fd23a17e83883b10d05bf64e25751 Mon Sep 17 00:00:00 2001 From: matteo serva Date: Sun, 27 Apr 2025 19:58:48 +0200 Subject: [PATCH 4/6] moved the chatgml4 check to higher priority --- src/llama-chat.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 40a31399e9672..99e1492787867 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -122,10 +122,9 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; + } else if (tmpl_contains("[gMASK]") && tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { /* GLM4 models */ + return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { - if (tmpl_contains("[gMASK]")) { /* new GLM4 0414 models */ - return LLM_CHAT_TEMPLATE_CHATGML_4; - } return tmpl_contains("") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; } else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) { return LLM_CHAT_TEMPLATE_GLMEDGE; @@ -158,8 +157,6 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("[gMASK]sop")) { // chatglm3-6b return LLM_CHAT_TEMPLATE_CHATGML_3; - } else if (tmpl_contains("[gMASK]")) { /* old GLM4 models */ - return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains(LU8("<用户>"))) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF return LLM_CHAT_TEMPLATE_MINICPM; From f44e24b11c09a29bb5acc56084cb943f342bee7f Mon Sep 17 00:00:00 2001 From: matteo serva Date: Sun, 27 Apr 2025 20:39:32 +0200 Subject: [PATCH 5/6] restored template for old GLM models --- src/llama-chat.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 99e1492787867..5c70cc808f462 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -122,7 +122,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; - } else if (tmpl_contains("[gMASK]") && tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { /* GLM4 models */ + } else if (tmpl_contains("[gMASK]") && tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { /* GLM4 0414 models */ return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { return tmpl_contains("") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; @@ -157,6 +157,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("[gMASK]sop")) { // chatglm3-6b return LLM_CHAT_TEMPLATE_CHATGML_3; + } else if (tmpl_contains("[gMASK]")) { + return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains(LU8("<用户>"))) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF return LLM_CHAT_TEMPLATE_MINICPM; From 0128f14608b48cf45b57422a47cb54f5357782d3 Mon Sep 17 00:00:00 2001 From: matteo serva Date: Sun, 27 Apr 2025 21:15:14 +0200 Subject: [PATCH 6/6] moved the GLM4 template check in the correct place with correct check --- src/llama-chat.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 5c70cc808f462..698c30ce49710 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -122,7 +122,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; - } else if (tmpl_contains("[gMASK]") && tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { /* GLM4 0414 models */ + } else if (tmpl_contains("[gMASK]")) { return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { return tmpl_contains("") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; @@ -157,8 +157,6 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("[gMASK]sop")) { // chatglm3-6b return LLM_CHAT_TEMPLATE_CHATGML_3; - } else if (tmpl_contains("[gMASK]")) { - return LLM_CHAT_TEMPLATE_CHATGML_4; } else if (tmpl_contains(LU8("<用户>"))) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF return LLM_CHAT_TEMPLATE_MINICPM;