Skip to content

Commit 5becaad

Browse files
committed
fixed the chat template check condition
1 parent 1c754ba commit 5becaad

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

gguf-py/gguf/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -794,7 +794,7 @@ class MODEL_TENSOR(IntEnum):
794794
MODEL_ARCH.APERTUS: "apertus",
795795
MODEL_ARCH.MINIMAXM2: "minimax-m2",
796796
MODEL_ARCH.COGVLM: "cogvlm",
797-
MODEL_ARCH.PANGU_EMBED: "pangu_embedded",
797+
MODEL_ARCH.PANGU_EMBED: "pangu-embedded",
798798
}
799799

800800
VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = {

src/llama-arch.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
107107
{ LLM_ARCH_APERTUS, "apertus" },
108108
{ LLM_ARCH_MINIMAX_M2, "minimax-m2" },
109109
{ LLM_ARCH_COGVLM, "cogvlm" },
110-
{ LLM_ARCH_PANGU_EMBED, "pangu_embedded" },
110+
{ LLM_ARCH_PANGU_EMBED, "pangu-embedded" },
111111
{ LLM_ARCH_UNKNOWN, "(unknown)" },
112112
};
113113

src/llama-chat.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
7373
{ "kimi-k2", LLM_CHAT_TEMPLATE_KIMI_K2 },
7474
{ "seed_oss", LLM_CHAT_TEMPLATE_SEED_OSS },
7575
{ "grok-2", LLM_CHAT_TEMPLATE_GROK_2 },
76-
{ "pangu_embedded", LLM_CHAT_TEMPLATE_PANGU_EMBED },
76+
{ "pangu-embedded", LLM_CHAT_TEMPLATE_PANGU_EMBED },
7777
};
7878

7979
llm_chat_template llm_chat_template_from_str(const std::string & name) {
@@ -214,7 +214,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
214214
return LLM_CHAT_TEMPLATE_SEED_OSS;
215215
} else if (tmpl_contains("'Assistant: ' + message['content'] + '<|separator|>")) {
216216
return LLM_CHAT_TEMPLATE_GROK_2;
217-
} else if (tmpl_contains("[unused9]") && tmpl_contains("message['content'] + '[unused10]'")) {
217+
} else if (tmpl_contains(LU8("[unused9]系统:[unused10]")) && tmpl_contains("message['content'] + '[unused10]'")) {
218218
return LLM_CHAT_TEMPLATE_PANGU_EMBED;
219219
}
220220
return LLM_CHAT_TEMPLATE_UNKNOWN;

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6281,7 +6281,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
62816281
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
62826282

62836283
// if output is NULL, init from the input tok embed
6284-
if(output == NULL){
6284+
if (output == NULL){
62856285
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
62866286
}
62876287

0 commit comments

Comments
 (0)