File tree Expand file tree Collapse file tree 2 files changed +2
-10
lines changed Expand file tree Collapse file tree 2 files changed +2
-10
lines changed Original file line number Diff line number Diff line change @@ -47,7 +47,7 @@ add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M"
4747add_test " llama-mtmd-cli" " ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0"
4848add_test " llama-mtmd-cli" " ggml-org/gemma-3-4b-it-GGUF:Q4_K_M"
4949add_test " llama-mtmd-cli" " guinmoon/MobileVLM-3B-GGUF:Q4_K_M" " deepseek"
50- add_test " llama-mtmd-cli" " THUDM/glm-edge-v-5b-gguf:Q4_K_M" " chatglm4 "
50+ add_test " llama-mtmd-cli" " THUDM/glm-edge-v-5b-gguf:Q4_K_M"
5151add_test " llama-mtmd-cli" " second-state/Llava-v1.5-7B-GGUF:Q2_K" " vicuna"
5252add_test " llama-mtmd-cli" " cjpais/llava-1.6-mistral-7b-gguf:Q3_K" " vicuna"
5353add_test " llama-mtmd-cli" " ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M"
Original file line number Diff line number Diff line change @@ -447,7 +447,7 @@ int32_t llm_chat_apply_template(
447447 if (add_ass) {
448448 ss << " <|assistant|>" ;
449449 }
450- } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
450+ } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4 || tmpl == LLM_CHAT_TEMPLATE_GLMEDGE ) {
451451 ss << " [gMASK]" << " <sop>" ;
452452 for (auto message : chat) {
453453 std::string role (message->role );
@@ -456,14 +456,6 @@ int32_t llm_chat_apply_template(
456456 if (add_ass) {
457457 ss << " <|assistant|>" ;
458458 }
459- } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
460- for (auto message : chat) {
461- std::string role (message->role );
462- ss << " <|" << role << " |>" << " \n " << message->content ;
463- }
464- if (add_ass) {
465- ss << " <|assistant|>" ;
466- }
467459 } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
468460 // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
469461 for (auto message : chat) {
You can’t perform that action at this time.
0 commit comments