Skip to content

Commit 4d01373

Browse files
committed
fix chat template
1 parent 971aa25 commit 4d01373

File tree

2 files changed

+2
-10
lines changed

2 files changed

+2
-10
lines changed

examples/llava/tests.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M"
4747
add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0"
4848
add_test "llama-mtmd-cli" "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M"
4949
add_test "llama-mtmd-cli" "guinmoon/MobileVLM-3B-GGUF:Q4_K_M" "deepseek"
50-
add_test "llama-mtmd-cli" "THUDM/glm-edge-v-5b-gguf:Q4_K_M" "chatglm4"
50+
add_test "llama-mtmd-cli" "THUDM/glm-edge-v-5b-gguf:Q4_K_M"
5151
add_test "llama-mtmd-cli" "second-state/Llava-v1.5-7B-GGUF:Q2_K" "vicuna"
5252
add_test "llama-mtmd-cli" "cjpais/llava-1.6-mistral-7b-gguf:Q3_K" "vicuna"
5353
add_test "llama-mtmd-cli" "ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M"

src/llama-chat.cpp

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ int32_t llm_chat_apply_template(
447447
if (add_ass) {
448448
ss << "<|assistant|>";
449449
}
450-
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
450+
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4 || tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
451451
ss << "[gMASK]" << "<sop>";
452452
for (auto message : chat) {
453453
std::string role(message->role);
@@ -456,14 +456,6 @@ int32_t llm_chat_apply_template(
456456
if (add_ass) {
457457
ss << "<|assistant|>";
458458
}
459-
} else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
460-
for (auto message : chat) {
461-
std::string role(message->role);
462-
ss << "<|" << role << "|>" << "\n" << message->content;
463-
}
464-
if (add_ass) {
465-
ss << "<|assistant|>";
466-
}
467459
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
468460
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
469461
for (auto message : chat) {

0 commit comments

Comments
 (0)