We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 87616f0 commit bc90b0bCopy full SHA for bc90b0b
src/llama-chat.cpp
@@ -123,6 +123,9 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
123
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
124
return LLM_CHAT_TEMPLATE_PHI_3;
125
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
126
+ if (tmpl_contains("[gMASK]<sop>")) { /* new GLM4 0414 models */
127
+ return LLM_CHAT_TEMPLATE_CHATGML_4;
128
+ }
129
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
130
} else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
131
return LLM_CHAT_TEMPLATE_GLMEDGE;
0 commit comments