Skip to content

Commit cb1f9f2

Browse files
committed
cgraph init
1 parent 35591a9 commit cb1f9f2

File tree

5 files changed

+261
-0
lines changed

5 files changed

+261
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6463,6 +6463,17 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]:
64636463
def set_gguf_parameters(self):
64646464
super().set_gguf_parameters()
64656465

6466+
self.gguf_writer.add_expert_count(self.hparams["num_experts"])
6467+
self.gguf_writer.add_expert_shared_feed_forward_length(self.hparams["intermediate_size"])
6468+
6469+
moe_intermediate_size = self.hparams["moe_intermediate_size"]
6470+
assert all(n == moe_intermediate_size[0] for n in moe_intermediate_size)
6471+
self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size[0])
6472+
6473+
moe_topk = self.hparams["moe_topk"]
6474+
assert all(topk == moe_topk[0] for topk in moe_topk)
6475+
self.gguf_writer.add_expert_used_count(moe_topk[0])
6476+
64666477
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
64676478
# process the experts separately
64686479
if name.find("mlp.experts") != -1:

src/llama-arch.cpp

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
7676
{ LLM_ARCH_BAILINGMOE, "bailingmoe" },
7777
{ LLM_ARCH_DOTS1, "dots1" },
7878
{ LLM_ARCH_ARCEE, "arcee" },
79+
{ LLM_ARCH_HUNYUAN_MOE, "hunyuan-moe" },
7980
{ LLM_ARCH_UNKNOWN, "(unknown)" },
8081
};
8182

@@ -1658,6 +1659,30 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
16581659
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
16591660
}
16601661
},
1662+
{
1663+
LLM_ARCH_HUNYUAN_MOE,
1664+
{
1665+
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1666+
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1667+
{ LLM_TENSOR_OUTPUT, "output" },
1668+
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1669+
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1670+
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1671+
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1672+
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1673+
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1674+
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1675+
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1676+
{ LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1677+
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1678+
{ LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1679+
{ LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1680+
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1681+
{ LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1682+
{ LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1683+
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1684+
},
1685+
},
16611686
{
16621687
LLM_ARCH_UNKNOWN,
16631688
{

src/llama-arch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ enum llm_arch {
8080
LLM_ARCH_BAILINGMOE,
8181
LLM_ARCH_DOTS1,
8282
LLM_ARCH_ARCEE,
83+
LLM_ARCH_HUNYUAN_MOE,
8384
LLM_ARCH_UNKNOWN,
8485
};
8586

src/llama-model.cpp

Lines changed: 223 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ const char * llm_type_name(llm_type type) {
101101
case LLM_TYPE_57B_A14B: return "57B.A14B";
102102
case LLM_TYPE_17B_16E: return "17Bx16E (Scout)";
103103
case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)";
104+
case LLM_TYPE_A13B: return "A13B";
104105
case LLM_TYPE_30B_A3B: return "30B.A3B";
105106
case LLM_TYPE_235B_A22B: return "235B.A22B";
106107
case LLM_TYPE_E2B: return "E2B";
@@ -1504,6 +1505,18 @@ void llama_model::load_hparams(llama_model_loader & ml) {
15041505
default: type = LLM_TYPE_UNKNOWN;
15051506
}
15061507
} break;
1508+
case LLM_ARCH_HUNYUAN_MOE:
1509+
{
1510+
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1511+
1512+
hparams.n_ff_exp = hparams.n_ff(0);
1513+
hparams.n_ff_shexp = hparams.n_ff_exp;
1514+
1515+
switch (hparams.n_layer) {
1516+
case 32: type = LLM_TYPE_A13B; break;
1517+
default: type = LLM_TYPE_UNKNOWN;
1518+
}
1519+
} break;
15071520
default: throw std::runtime_error("unsupported model architecture");
15081521
}
15091522

@@ -4348,6 +4361,43 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
43484361
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
43494362
}
43504363
} break;
4364+
case LLM_ARCH_HUNYUAN_MOE:
4365+
{
4366+
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4367+
4368+
// output
4369+
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4370+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4371+
// if output is NULL, init from the input tok embed
4372+
if (output == NULL) {
4373+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4374+
}
4375+
4376+
for (int i = 0; i < n_layer; ++i) {
4377+
auto & layer = layers[i];
4378+
4379+
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4380+
4381+
layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4382+
layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
4383+
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
4384+
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4385+
4386+
layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
4387+
layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
4388+
4389+
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4390+
4391+
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4392+
layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
4393+
layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
4394+
layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
4395+
4396+
layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4397+
layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4398+
layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
4399+
}
4400+
} break;
43514401
default:
43524402
throw std::runtime_error("unknown architecture");
43534403
}
@@ -14260,6 +14310,174 @@ struct llm_build_arcee : public llm_graph_context {
1426014310
}
1426114311
};
1426214312

14313+
struct llm_build_hunyuan_moe : public llm_graph_context {
14314+
llm_build_hunyuan_moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
14315+
const int64_t n_embd_head = hparams.n_embd_head_v;
14316+
14317+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
14318+
GGML_ASSERT(n_embd_head == hparams.n_rot);
14319+
14320+
ggml_tensor * cur;
14321+
ggml_tensor * inpL;
14322+
14323+
inpL = build_inp_embd(model.tok_embd);
14324+
14325+
// inp_pos - contains the positions
14326+
ggml_tensor * inp_pos = build_inp_pos();
14327+
14328+
auto * inp_attn = build_attn_inp_kv_unified();
14329+
14330+
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
14331+
14332+
ggml_tensor * inp_out_ids = build_inp_out_ids();
14333+
14334+
for (int il = 0; il < n_layer; ++il) {
14335+
ggml_tensor * inpSA = inpL;
14336+
14337+
// norm
14338+
cur = build_norm(inpL,
14339+
model.layers[il].attn_norm, NULL,
14340+
LLM_NORM_RMS, il);
14341+
cb(cur, "attn_norm", il);
14342+
14343+
// self-attention
14344+
{
14345+
// rope freq factors for llama3; may return nullptr for llama2 and other models
14346+
ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
14347+
14348+
// compute Q and K and RoPE them
14349+
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
14350+
cb(Qcur, "Qcur", il);
14351+
if (model.layers[il].bq) {
14352+
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
14353+
cb(Qcur, "Qcur", il);
14354+
}
14355+
14356+
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
14357+
cb(Kcur, "Kcur", il);
14358+
if (model.layers[il].bk) {
14359+
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
14360+
cb(Kcur, "Kcur", il);
14361+
}
14362+
14363+
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
14364+
cb(Vcur, "Vcur", il);
14365+
if (model.layers[il].bv) {
14366+
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
14367+
cb(Vcur, "Vcur", il);
14368+
}
14369+
14370+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
14371+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
14372+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
14373+
14374+
Qcur = ggml_rope_ext(
14375+
ctx0, Qcur, inp_pos, rope_factors,
14376+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
14377+
ext_factor, attn_factor, beta_fast, beta_slow
14378+
);
14379+
14380+
Kcur = ggml_rope_ext(
14381+
ctx0, Kcur, inp_pos, rope_factors,
14382+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
14383+
ext_factor, attn_factor, beta_fast, beta_slow
14384+
);
14385+
14386+
if (model.layers[il].attn_k_norm) {
14387+
Kcur = build_norm(Kcur,
14388+
model.layers[il].attn_k_norm, model.layers[il].attn_k_norm_b,
14389+
LLM_NORM_RMS, il);
14390+
cb(Kcur, "Kcur_norm", il);
14391+
}
14392+
14393+
if (model.layers[il].attn_q_norm) {
14394+
Qcur = build_norm(Qcur,
14395+
model.layers[il].attn_q_norm, model.layers[il].attn_q_norm_b,
14396+
LLM_NORM_RMS, il);
14397+
cb(Qcur, "Qcur_norm", il);
14398+
}
14399+
14400+
cb(Qcur, "Qcur", il);
14401+
cb(Kcur, "Kcur", il);
14402+
cb(Vcur, "Vcur", il);
14403+
14404+
cur = build_attn(inp_attn, gf,
14405+
model.layers[il].wo, model.layers[il].bo,
14406+
Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
14407+
cb(cur, "attn_out", il);
14408+
}
14409+
14410+
if (il == n_layer - 1 && inp_out_ids) {
14411+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
14412+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
14413+
}
14414+
14415+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
14416+
cb(ffn_inp, "ffn_inp", il);
14417+
14418+
ffn_inp = build_norm(ffn_inp,
14419+
model.layers[il].ffn_norm, NULL,
14420+
LLM_NORM_RMS, il);
14421+
cb(cur, "ffn_norm", il);
14422+
14423+
// feed-forward network (non-MoE)
14424+
ggml_tensor * cur_mlp = nullptr;
14425+
{
14426+
cur_mlp = build_ffn(ffn_inp,
14427+
model.layers[il].ffn_up_shexp, NULL, NULL,
14428+
model.layers[il].ffn_gate_shexp, NULL, NULL,
14429+
model.layers[il].ffn_down_shexp, NULL, NULL,
14430+
NULL,
14431+
LLM_FFN_SILU, LLM_FFN_PAR, il);
14432+
cb(cur_mlp, "ffn_out", il);
14433+
}
14434+
14435+
// MoE branch
14436+
ggml_tensor * cur_moe = nullptr;
14437+
{
14438+
cur_moe = build_moe_ffn(ffn_inp,
14439+
model.layers[il].ffn_gate_inp,
14440+
model.layers[il].ffn_up_exps,
14441+
model.layers[il].ffn_gate_exps,
14442+
model.layers[il].ffn_down_exps,
14443+
nullptr,
14444+
n_expert, n_expert_used,
14445+
LLM_FFN_SILU, true,
14446+
false, 0.0,
14447+
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
14448+
il);
14449+
cb(cur_moe, "ffn_moe_out", il);
14450+
}
14451+
14452+
cur = ggml_add(ctx0, ggml_add(ctx0, cur_moe, cur_mlp), ffn_inp);
14453+
cb(cur, "ffn_out", il);
14454+
14455+
cur = build_cvec(cur, il);
14456+
cb(cur, "l_out", il);
14457+
14458+
// input for next layer
14459+
inpL = cur;
14460+
}
14461+
14462+
cur = inpL;
14463+
14464+
cur = build_norm(cur,
14465+
model.output_norm, NULL,
14466+
LLM_NORM_RMS, -1);
14467+
14468+
cb(cur, "result_norm", -1);
14469+
res->t_embd = cur;
14470+
14471+
// lm_head
14472+
cur = build_lora_mm(model.output, cur);
14473+
14474+
cb(cur, "result_output", -1);
14475+
res->t_logits = cur;
14476+
14477+
ggml_build_forward_expand(gf, cur);
14478+
}
14479+
};
14480+
1426314481
llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const {
1426414482
llama_memory_i * res;
1426514483

@@ -14635,6 +14853,10 @@ llm_graph_result_ptr llama_model::build_graph(
1463514853
{
1463614854
llm = std::make_unique<llm_build_arcee>(*this, params, gf);
1463714855
} break;
14856+
case LLM_ARCH_HUNYUAN_MOE:
14857+
{
14858+
llm = std::make_unique<llm_build_hunyuan_moe>(*this, params, gf);
14859+
} break;
1463814860
default:
1463914861
GGML_ABORT("fatal error");
1464014862
}
@@ -14786,6 +15008,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
1478615008
case LLM_ARCH_BAILINGMOE:
1478715009
case LLM_ARCH_NEO_BERT:
1478815010
case LLM_ARCH_ARCEE:
15011+
case LLM_ARCH_HUNYUAN_MOE:
1478915012
return LLAMA_ROPE_TYPE_NORM;
1479015013

1479115014
// the pairs of head values are offset by n_rot/2

src/llama-model.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ enum llm_type {
9393
LLM_TYPE_57B_A14B,
9494
LLM_TYPE_17B_16E, // llama4 Scout
9595
LLM_TYPE_17B_128E, // llama4 Maverick
96+
LLM_TYPE_A13B,
9697
LLM_TYPE_30B_A3B,
9798
LLM_TYPE_235B_A22B,
9899
LLM_TYPE_E2B,

0 commit comments

Comments
 (0)