Skip to content

Commit 5d2c042

Browse files
support hunyuan_v1_dense
Signed-off-by: stevenkuang <[email protected]>
1 parent 86f5623 commit 5d2c042

File tree

8 files changed

+361
-0
lines changed

8 files changed

+361
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
684684
if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
685685
# ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
686686
res = "hunyuan"
687+
if chkhsh == "bba3b3366b646dbdded5dbc42d59598b849371afc42f7beafa914afaa5b70aa6":
688+
# TODO: update ref
689+
res = "hunyuan"
687690
if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
688691
# ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
689692
res = "falcon-h1"
@@ -7531,6 +7534,108 @@ def prepare_tensors(self):
75317534
raise ValueError(f"Unprocessed experts: {experts}")
75327535

75337536

7537+
@ModelBase.register("HunYuanDenseV1ForCausalLM")
7538+
class HunYuanModel(TextModel):
7539+
model_arch = gguf.MODEL_ARCH.HUNYUAN_V1_DENSE
7540+
7541+
def __init__(self, *args, **kwargs):
7542+
super().__init__(*args, **kwargs)
7543+
# For handling tied embeddings
7544+
self._tok_embd = None
7545+
7546+
def set_vocab(self):
7547+
if (self.dir_model / "tokenizer.json").is_file():
7548+
self._set_vocab_gpt2()
7549+
else:
7550+
from transformers import AutoTokenizer
7551+
tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
7552+
7553+
# 1. Get the pre-tokenizer identifier hash
7554+
tokpre = self.get_vocab_base_pre(tokenizer)
7555+
7556+
# 2. Reverse-engineer the merges list from mergeable_ranks
7557+
merges = []
7558+
vocab = {}
7559+
mergeable_ranks = tokenizer.mergeable_ranks
7560+
for token, rank in mergeable_ranks.items():
7561+
vocab[QwenModel.token_bytes_to_string(token)] = rank
7562+
if len(token) == 1:
7563+
continue
7564+
merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
7565+
if len(merged) == 2:
7566+
merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
7567+
7568+
# 3. Generate the tokens and toktypes lists
7569+
vocab_size = self.hparams["vocab_size"]
7570+
assert tokenizer.vocab_size == vocab_size
7571+
special_tokens = tokenizer.special_tokens
7572+
reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
7573+
tokens: list[str] = []
7574+
toktypes: list[int] = []
7575+
for i in range(vocab_size):
7576+
if i not in reverse_vocab:
7577+
tokens.append(f"[PAD{i}]")
7578+
toktypes.append(gguf.TokenType.UNUSED)
7579+
else:
7580+
token = reverse_vocab[i]
7581+
tokens.append(token)
7582+
if i in special_tokens.values():
7583+
toktypes.append(gguf.TokenType.CONTROL)
7584+
else:
7585+
toktypes.append(gguf.TokenType.NORMAL)
7586+
7587+
# 4. Write all vocab-related fields to the GGUF writer
7588+
self.gguf_writer.add_tokenizer_model("gpt2")
7589+
self.gguf_writer.add_tokenizer_pre(tokpre)
7590+
self.gguf_writer.add_token_list(tokens)
7591+
self.gguf_writer.add_token_types(toktypes)
7592+
self.gguf_writer.add_token_merges(merges)
7593+
7594+
# 5. Add special tokens and chat templates
7595+
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
7596+
special_vocab.add_to_gguf(self.gguf_writer)
7597+
# FIX for BOS token: Overwrite incorrect id read from config.json
7598+
self.gguf_writer.add_bos_token_id(127958) # <|bos|>
7599+
7600+
def set_gguf_parameters(self):
7601+
super().set_gguf_parameters()
7602+
hparams = self.hparams
7603+
7604+
self.gguf_writer.add_expert_shared_feed_forward_length(hparams["intermediate_size"])
7605+
7606+
# Rope
7607+
rope_scaling = hparams.get("rope_scaling", {})
7608+
if rope_scaling.get("type") == "dynamic":
7609+
# HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
7610+
# 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
7611+
alpha = rope_scaling.get("alpha", 50)
7612+
base = hparams.get("rope_theta", 10000.0)
7613+
dim = hparams["head_dim"]
7614+
scaled_base = base * (alpha ** (dim / (dim - 2)))
7615+
self.gguf_writer.add_rope_freq_base(scaled_base)
7616+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
7617+
self.gguf_writer.add_rope_scaling_factor(1)
7618+
# There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
7619+
self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
7620+
self.gguf_writer.add_context_length(256 * 1024) # 256k context length
7621+
7622+
# if any of our assumptions about the values are wrong, something has changed and this may need to be updated
7623+
assert alpha == 50 and base == 10000.0 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
7624+
"HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
7625+
7626+
_experts: list[dict[str, Tensor]] | None = None
7627+
7628+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
7629+
if name == "model.embed_tokens.weight":
7630+
self._tok_embd = data_torch.clone()
7631+
7632+
if name == "lm_head.weight":
7633+
if self.hparams.get("tie_word_embeddings", False):
7634+
logger.info("Skipping tied output layer 'lm_head.weight'")
7635+
return []
7636+
7637+
return [(self.map_tensor_name(name), data_torch)]
7638+
75347639
@ModelBase.register("SmolLM3ForCausalLM")
75357640
class SmolLM3Model(LlamaModel):
75367641
model_arch = gguf.MODEL_ARCH.SMOLLM3

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ class TOKENIZER_TYPE(IntEnum):
140140
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"},
141141
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"},
142142
{"name": "hunyuan", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tencent/Hunyuan-A13B-Instruct", "chkhsh": "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664"},
143+
{"name": "hunyuan-v1-dense", "tokt": TOKENIZER_TYPE.BPE, "repo": "", "chkhsh": ""}, # TODO: update hunyuan-v1-dense repo
143144
# falcon-h1 series uses 4 different tokenizers across model sizes (0.5b - 34b), hence we need to define 4 different hashes
144145
{"name": "falcon-h1", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base", "chkhsh": "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6"},
145146
{"name": "falcon-h1", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/Falcon-H1-1B-Base", "chkhsh": "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86"},

gguf-py/gguf/constants.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,7 @@ class MODEL_ARCH(IntEnum):
373373
ERNIE4_5 = auto()
374374
ERNIE4_5_MOE = auto()
375375
HUNYUAN_MOE = auto()
376+
HUNYUAN_V1_DENSE = auto()
376377
SMOLLM3 = auto()
377378
LFM2 = auto()
378379
DREAM = auto()
@@ -692,6 +693,7 @@ class MODEL_TENSOR(IntEnum):
692693
MODEL_ARCH.ERNIE4_5_MOE: "ernie4_5-moe",
693694
MODEL_ARCH.FALCON_H1: "falcon-h1",
694695
MODEL_ARCH.HUNYUAN_MOE: "hunyuan-moe",
696+
MODEL_ARCH.HUNYUAN_V1_DENSE: "hunyuan-v1-dense",
695697
MODEL_ARCH.SMOLLM3: "smollm3",
696698
MODEL_ARCH.LFM2: "lfm2",
697699
MODEL_ARCH.DREAM: "dream",
@@ -2449,6 +2451,22 @@ class MODEL_TENSOR(IntEnum):
24492451
MODEL_TENSOR.FFN_DOWN_SHEXP,
24502452
MODEL_TENSOR.FFN_UP_SHEXP,
24512453
],
2454+
MODEL_ARCH.HUNYUAN_V1_DENSE: [
2455+
MODEL_TENSOR.TOKEN_EMBD,
2456+
MODEL_TENSOR.OUTPUT_NORM,
2457+
MODEL_TENSOR.OUTPUT,
2458+
MODEL_TENSOR.ATTN_NORM,
2459+
MODEL_TENSOR.ATTN_Q,
2460+
MODEL_TENSOR.ATTN_Q_NORM,
2461+
MODEL_TENSOR.ATTN_K,
2462+
MODEL_TENSOR.ATTN_K_NORM,
2463+
MODEL_TENSOR.ATTN_V,
2464+
MODEL_TENSOR.ATTN_OUT,
2465+
MODEL_TENSOR.FFN_NORM,
2466+
MODEL_TENSOR.FFN_GATE,
2467+
MODEL_TENSOR.FFN_DOWN,
2468+
MODEL_TENSOR.FFN_UP,
2469+
],
24522470
MODEL_ARCH.SMOLLM3: [
24532471
MODEL_TENSOR.TOKEN_EMBD,
24542472
MODEL_TENSOR.OUTPUT_NORM,

src/llama-arch.cpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
8585
{ LLM_ARCH_ERNIE4_5, "ernie4_5" },
8686
{ LLM_ARCH_ERNIE4_5_MOE, "ernie4_5-moe" },
8787
{ LLM_ARCH_HUNYUAN_MOE, "hunyuan-moe" },
88+
{ LLM_ARCH_HUNYUAN_V1_DENSE, "hunyuan-v1-dense" },
8889
{ LLM_ARCH_SMOLLM3, "smollm3" },
8990
{ LLM_ARCH_LFM2, "lfm2" },
9091
{ LLM_ARCH_DREAM, "dream" },
@@ -1895,6 +1896,26 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
18951896
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
18961897
},
18971898
},
1899+
{
1900+
LLM_ARCH_HUNYUAN_V1_DENSE,
1901+
{
1902+
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1903+
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1904+
{ LLM_TENSOR_OUTPUT, "output" },
1905+
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1906+
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1907+
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1908+
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1909+
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1910+
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1911+
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1912+
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1913+
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1914+
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1915+
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1916+
1917+
},
1918+
},
18981919
{
18991920
LLM_ARCH_SMOLLM3,
19001921
{

src/llama-arch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ enum llm_arch {
8989
LLM_ARCH_ERNIE4_5,
9090
LLM_ARCH_ERNIE4_5_MOE,
9191
LLM_ARCH_HUNYUAN_MOE,
92+
LLM_ARCH_HUNYUAN_V1_DENSE,
9293
LLM_ARCH_SMOLLM3,
9394
LLM_ARCH_LFM2,
9495
LLM_ARCH_DREAM,

src/llama-chat.cpp

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
6666
{ "llama4", LLM_CHAT_TEMPLATE_LLAMA4 },
6767
{ "smolvlm", LLM_CHAT_TEMPLATE_SMOLVLM },
6868
{ "hunyuan-moe", LLM_CHAT_TEMPLATE_HUNYUAN_MOE },
69+
{ "hunyuan-v1-dense", LLM_CHAT_TEMPLATE_HUNYUAN_V1_DENSE },
6970
{ "kimi-k2", LLM_CHAT_TEMPLATE_KIMI_K2 },
7071
};
7172

@@ -193,6 +194,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
193194
return LLM_CHAT_TEMPLATE_DOTS1;
194195
} else if (tmpl_contains("<|startoftext|>") && tmpl_contains("<|extra_4|>")) {
195196
return LLM_CHAT_TEMPLATE_HUNYUAN_MOE;
197+
} else if (tmpl_contains("<|hy_place▁holder▁no▁2|>") && tmpl_contains("<|hy_place▁holder▁no▁3|>")) {
198+
return LLM_CHAT_TEMPLATE_HUNYUAN_V1_DENSE;
196199
} else if (tmpl_contains("<|im_assistant|>assistant<|im_middle|>")) {
197200
return LLM_CHAT_TEMPLATE_KIMI_K2;
198201
}
@@ -703,6 +706,29 @@ int32_t llm_chat_apply_template(
703706
ss << "<|startoftext|>" << message->content << "<|extra_0|>";
704707
}
705708
}
709+
} else if (tmpl == LLM_CHAT_TEMPLATE_HUNYUAN_V1_DENSE) {
710+
// Todo: add model name
711+
for (size_t i = 0; i < chat.size(); i++) {
712+
std::string role(chat[i]->role);
713+
if (i == 0) {
714+
if (role == "system") {
715+
ss << "<|hy_begin▁of▁sentence|>" << chat[i]->content << "<|hy_place▁holder▁no▁3|>";
716+
} else {
717+
ss << "<|hy_begin▁of▁sentence|>";
718+
}
719+
}
720+
721+
if (role == "assistant") {
722+
ss << "<|hy_Assistant|>" << chat[i]->content << "<|hy_place▁holder▁no▁2|>";
723+
} else if (role == "user") {
724+
ss << "<|hy_User|>" << chat[i]->content;
725+
}
726+
}
727+
if (add_ass) {
728+
ss << "<|hy_Assistant|>";
729+
} else {
730+
ss << "<|hy_place▁holder▁no▁8|>";
731+
}
706732
} else if (tmpl == LLM_CHAT_TEMPLATE_KIMI_K2) {
707733
// moonshotai/Kimi-K2-Instruct
708734
for (auto message : chat) {

src/llama-chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ enum llm_chat_template {
4646
LLM_CHAT_TEMPLATE_SMOLVLM,
4747
LLM_CHAT_TEMPLATE_DOTS1,
4848
LLM_CHAT_TEMPLATE_HUNYUAN_MOE,
49+
LLM_CHAT_TEMPLATE_HUNYUAN_V1_DENSE,
4950
LLM_CHAT_TEMPLATE_KIMI_K2,
5051
LLM_CHAT_TEMPLATE_UNKNOWN,
5152
};

0 commit comments

Comments
 (0)