Skip to content

Commit 67b38f5

Browse files
committed
Supporting Velvet model
1 parent 9ab42dc commit 67b38f5

File tree

5 files changed

+32
-1
lines changed

5 files changed

+32
-1
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -699,6 +699,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
699699
if chkhsh == "b3f499bb4255f8ca19fccd664443283318f2fd2414d5e0b040fbdd0cc195d6c5":
700700
# ref: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
701701
res = "deepseek-r1-qwen"
702+
if chkhsh == "a3df2b8943e01cfd7d68c9f8446b294f3d8706d1d6853df65df7fda5d4fcb19f":
703+
# ref: https://huggingface.co/Almawave/Velvet-14B
704+
res = "velvet"
702705

703706
if res is None:
704707
logger.warning("\n")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ class TOKENIZER_TYPE(IntEnum):
109109
{"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"},
110110
{"name": "deepseek-v3", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"},
111111
{"name": "deepseek-r1-qwen", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"},
112+
{"name": "velvet", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Almawave/Velvet-14B"}
112113
]
113114

114115

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@ extern "C" {
105105
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
106106
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
107107
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
108+
LLAMA_VOCAB_PRE_TYPE_VELVET = 29
108109
};
109110

110111
enum llama_rope_type {

src/llama-chat.cpp

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
5858
{ "granite", LLM_CHAT_TEMPLATE_GRANITE },
5959
{ "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT },
6060
{ "megrez", LLM_CHAT_TEMPLATE_MEGREZ },
61+
{ "velvet", LLM_CHAT_TEMPLATE_VELVET },
6162
};
6263

6364
llm_chat_template llm_chat_template_from_str(const std::string & name) {
@@ -167,6 +168,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
167168
return LLM_CHAT_TEMPLATE_GIGACHAT;
168169
} else if (tmpl_contains("<|role_start|>")) {
169170
return LLM_CHAT_TEMPLATE_MEGREZ;
171+
} else if (tmpl_contains("<instruction>")) {
172+
return LLM_CHAT_TEMPLATE_VELVET;
170173
}
171174
return LLM_CHAT_TEMPLATE_UNKNOWN;
172175
}
@@ -566,10 +569,32 @@ int32_t llm_chat_apply_template(
566569
if (add_ass) {
567570
ss << "<|role_start|>assistant<|role_end|>";
568571
}
572+
} else if (tmpl == LLM_CHAT_TEMPLATE_VELVET) {
573+
// Velvet template
574+
std::string leading_space = "";
575+
std::string trailing_space = "";
576+
bool trim_assistant_message = true;
577+
bool is_inside_turn = false;
578+
for (auto message : chat) {
579+
if (!is_inside_turn) {
580+
ss << leading_space << "<instruction>" << trailing_space;
581+
is_inside_turn = true;
582+
}
583+
std::string role(message->role);
584+
std::string content(message->content);
585+
if (role == "system") {
586+
ss << content << "\n\n";
587+
} else if (role == "user") {
588+
ss << content << leading_space << "</instruction>";
589+
} else {
590+
ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "</s>";
591+
is_inside_turn = false;
592+
}
593+
}
569594
} else {
570595
// template not supported
571596
return -1;
572-
}
597+
}
573598
dest = ss.str();
574599
return dest.size();
575600
}

src/llama-chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ enum llm_chat_template {
3939
LLM_CHAT_TEMPLATE_GIGACHAT,
4040
LLM_CHAT_TEMPLATE_MEGREZ,
4141
LLM_CHAT_TEMPLATE_UNKNOWN,
42+
LLM_CHAT_TEMPLATE_VELVET
4243
};
4344

4445
struct llama_chat_message;

0 commit comments

Comments
 (0)