Skip to content

Commit ca2e99c

Browse files
committed
Update modeling code, arch
1 parent a7dd86a commit ca2e99c

File tree

8 files changed

+243
-28
lines changed

8 files changed

+243
-28
lines changed

convert_hf_to_gguf.py

Lines changed: 43 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2493,45 +2493,63 @@ def set_gguf_parameters(self):
24932493

24942494
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
24952495
# Handle expert weights - they're already merged in the HF format
2496-
if ".block_sparse_moe.experts.w1" in name:
2497-
assert bid is not None
2498-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), data_torch)]
2499-
elif ".block_sparse_moe.experts.w2" in name:
2500-
assert bid is not None
2501-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN_EXP, bid), data_torch)]
2502-
elif ".block_sparse_moe.experts.w3" in name:
2496+
# process the experts separately
2497+
if name.find("mlp.experts") != -1:
2498+
n_experts = self.hparams["num_experts"]
25032499
assert bid is not None
2504-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), data_torch)]
2505-
2506-
# Map dual normalization layers
2507-
if ".attn_norm_a." in name and bid is not None:
2508-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_NORM, bid), data_torch)]
2509-
elif ".attn_norm_b." in name and bid is not None:
2510-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_POST_NORM, bid), data_torch)]
2511-
elif ".ffn_norm_a." in name and bid is not None:
2512-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_NORM, bid), data_torch)]
2513-
elif ".ffn_norm_b." in name and bid is not None:
2514-
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_POST_NORM, bid), data_torch)]
2500+
2501+
if self._experts is None:
2502+
self._experts = [{} for _ in range(self.block_count)]
2503+
2504+
self._experts[bid][name] = data_torch
2505+
2506+
if len(self._experts[bid]) >= n_experts * 3:
2507+
tensors: list[tuple[str, Tensor]] = []
2508+
2509+
# merge the experts into a single 3d tensor
2510+
for w_name in ["gate_proj", "up_proj", "down_proj"]:
2511+
datas: list[Tensor] = []
2512+
2513+
for xid in range(n_experts):
2514+
ename_to_retrieve = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
2515+
datas.append(self._experts[bid][ename_to_retrieve])
2516+
del self._experts[bid][ename_to_retrieve]
2517+
2518+
data_torch = torch.stack(datas, dim=0)
2519+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
2520+
new_name = self.map_tensor_name(merged_name)
2521+
tensors.append((new_name, data_torch))
2522+
2523+
return tensors
2524+
else:
2525+
return []
25152526

25162527
# Map attention gate
25172528
elif ".self_attn.gate_proj." in name and bid is not None:
25182529
return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_GATE, bid), data_torch)]
25192530

25202531
# Map shared experts
2521-
elif ".block_sparse_moe.shared_experts.gate_proj." in name and bid is not None:
2532+
elif ".mlp.shared_experts.gate_proj." in name and bid is not None:
25222533
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), data_torch)]
2523-
elif ".block_sparse_moe.shared_experts.up_proj." in name and bid is not None:
2534+
elif ".mlp.shared_experts.up_proj." in name and bid is not None:
25242535
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), data_torch)]
2525-
elif ".block_sparse_moe.shared_experts.down_proj." in name and bid is not None:
2536+
elif ".mlp.shared_experts.down_proj." in name and bid is not None:
25262537
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN_SHEXP, bid), data_torch)]
25272538

2539+
# Pre FFN norm
2540+
elif ".pre_mlp_layernorm." in name and bid is not None:
2541+
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_PRE_NORM, bid), data_torch)]
2542+
2543+
# Post FFN norm
2544+
elif ".post_mlp_layernorm." in name and bid is not None:
2545+
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_POST_NORM, bid), data_torch)]
2546+
25282547
# Map router
2529-
elif ".block_sparse_moe.router.gate." in name and bid is not None:
2548+
elif ".mlp.router.gate." in name and bid is not None:
25302549
return [(self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_INP, bid), data_torch)]
25312550

2532-
# Skip expert_bias
2533-
elif "expert_bias" in name:
2534-
return []
2551+
if name.endswith(".expert_bias"):
2552+
name = name.replace(".expert_bias", ".expert_bias.bias")
25352553

25362554
return [(self.map_tensor_name(name), data_torch)]
25372555

gguf-py/gguf/constants.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2710,8 +2710,6 @@ class MODEL_TENSOR(IntEnum):
27102710
MODEL_TENSOR.ATTN_Q_NORM,
27112711
MODEL_TENSOR.ATTN_K_NORM,
27122712
MODEL_TENSOR.ATTN_GATE,
2713-
MODEL_TENSOR.FFN_NORM,
2714-
MODEL_TENSOR.FFN_POST_NORM,
27152713
MODEL_TENSOR.FFN_GATE,
27162714
MODEL_TENSOR.FFN_DOWN,
27172715
MODEL_TENSOR.FFN_UP,
@@ -2722,6 +2720,9 @@ class MODEL_TENSOR(IntEnum):
27222720
MODEL_TENSOR.FFN_GATE_SHEXP,
27232721
MODEL_TENSOR.FFN_UP_SHEXP,
27242722
MODEL_TENSOR.FFN_DOWN_SHEXP,
2723+
MODEL_TENSOR.FFN_PRE_NORM,
2724+
MODEL_TENSOR.FFN_POST_NORM,
2725+
MODEL_TENSOR.FFN_EXP_PROBS_B,
27252726
],
27262727
MODEL_ARCH.ERNIE4_5: [
27272728
MODEL_TENSOR.TOKEN_EMBD,

gguf-py/gguf/tensor_mapping.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -340,11 +340,12 @@ class TensorNameMap:
340340
"model.layers.{bid}.feedforward_layernorm", # apertus
341341
),
342342

343-
# Post feed-forward norm
343+
# Pre feed-forward norm
344344
MODEL_TENSOR.FFN_PRE_NORM: (
345345
"model.layers.{bid}.pre_feedforward_layernorm", # gemma2
346346
"layers.{bid}.pre_feedforward_layernorm", # embeddinggemma
347347
"model.layers.{bid}.pre_ff_layernorm.weight",
348+
"model.layers.{bid}.pre_mlp_layernorm", # afmoe
348349
),
349350

350351
# Post feed-forward norm
@@ -380,6 +381,7 @@ class TensorNameMap:
380381
"model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1
381382
"model.layers.{bid}.mlp.moe_statics.e_score_correction", # ernie4.5-moe
382383
"model.layers.{bid}.mlp.gate.expert_bias", # bailingmoe2
384+
"model.layers.{bid}.mlp.expert_bias", # afmoe
383385
"model.layers.{bid}.feed_forward.expert_bias", # lfm2moe
384386
"model.layers.{bid}.block_sparse_moe.e_score_correction", # minimax-m2
385387
),

src/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ add_library(llama
3535
unicode-data.cpp
3636
unicode.cpp
3737
unicode.h
38+
models/afmoe.cpp
3839
models/apertus.cpp
3940
models/arcee.cpp
4041
models/arctic.cpp

src/llama-arch.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,7 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
361361
{ LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
362362
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
363363
{ LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
364+
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
364365
},
365366
},
366367
{

src/llama-model.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5829,6 +5829,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
58295829
if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) {
58305830
// MoE layers
58315831
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
5832+
layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
58325833

58335834
// grouped expert weights
58345835
layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);

src/models/afmoe.cpp

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
#include "models.h"
2+
3+
llm_build_afmoe::llm_build_afmoe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
4+
const int64_t n_embd_head = hparams.n_embd_head_v;
5+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
6+
7+
ggml_tensor * cur;
8+
ggml_tensor * inpL;
9+
10+
inpL = build_inp_embd(model.tok_embd);
11+
12+
// MuP scaling: embeddings * sqrt(hidden_size)
13+
// mup_enabled = true, hidden_size = 1024, scale = 32.0
14+
inpL = ggml_scale(ctx0, inpL, sqrtf(float(n_embd)));
15+
cb(inpL, "inp_embd_scaled", -1);
16+
17+
// inp_pos - contains the positions
18+
ggml_tensor * inp_pos = build_inp_pos();
19+
auto * inp_attn = build_attn_inp_kv();
20+
ggml_tensor * inp_out_ids = build_inp_out_ids();
21+
22+
const float kq_scale = 1.0f/sqrtf(float(n_embd_head));
23+
24+
for (int il = 0; il < n_layer; ++il) {
25+
ggml_tensor * inpSA = inpL;
26+
27+
// dual attention normalization (pre)
28+
cur = build_norm(inpL,
29+
model.layers[il].attn_norm, NULL,
30+
LLM_NORM_RMS, il);
31+
cb(cur, "attn_norm", il);
32+
33+
// self-attention
34+
{
35+
ggml_tensor * attn_inp = cur; // save input for gate computation
36+
37+
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
38+
cb(Qcur, "Qcur", il);
39+
40+
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
41+
cb(Kcur, "Kcur", il);
42+
43+
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
44+
cb(Vcur, "Vcur", il);
45+
46+
// compute gate from input
47+
ggml_tensor * gate = build_lora_mm(model.layers[il].wqkv_gate, attn_inp);
48+
cb(gate, "attn_gate_proj", il);
49+
50+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
51+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
52+
53+
// Q/K normalization
54+
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
55+
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
56+
cb(Qcur, "Qcur_normed", il);
57+
cb(Kcur, "Kcur_normed", il);
58+
59+
// RoPE only for sliding_attention layers (every 4th layer is full_attention)
60+
// layer_types[i] = "sliding_attention" if (i+1) % global_attn_every_n_layers != 0
61+
bool is_sliding = ((il + 1) % 4) != 0; // global_attn_every_n_layers = 4
62+
if (is_sliding) {
63+
Qcur = ggml_rope_ext(
64+
ctx0, Qcur, inp_pos, nullptr,
65+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
66+
ext_factor, attn_factor, beta_fast, beta_slow);
67+
cb(Qcur, "Qcur_rope", il);
68+
69+
Kcur = ggml_rope_ext(
70+
ctx0, Kcur, inp_pos, nullptr,
71+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
72+
ext_factor, attn_factor, beta_fast, beta_slow);
73+
cb(Kcur, "Kcur_rope", il);
74+
}
75+
76+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
77+
78+
cur = build_attn(inp_attn,
79+
NULL, NULL, // wo will be applied after gating
80+
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
81+
cb(cur, "attn_out", il);
82+
83+
// attention gating: attn_out * sigmoid(gate) BEFORE o_proj
84+
gate = ggml_sigmoid(ctx0, gate);
85+
cb(gate, "attn_gate_sig", il);
86+
cur = ggml_mul(ctx0, cur, gate);
87+
cb(cur, "attn_gated", il);
88+
89+
// now apply output projection
90+
cur = build_lora_mm(model.layers[il].wo, cur);
91+
cb(cur, "attn_o_proj", il);
92+
}
93+
94+
// dual attention normalization (post)
95+
cur = build_norm(cur,
96+
model.layers[il].attn_post_norm, NULL,
97+
LLM_NORM_RMS, il);
98+
cb(cur, "attn_post_norm", il);
99+
100+
if (il == n_layer - 1 && inp_out_ids) {
101+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
102+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
103+
}
104+
105+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
106+
cb(ffn_inp, "ffn_inp", il);
107+
108+
// dual ffn normalization (pre)
109+
cur = build_norm(ffn_inp,
110+
model.layers[il].ffn_norm, NULL,
111+
LLM_NORM_RMS, il);
112+
cb(cur, "ffn_norm", il);
113+
114+
// MoE or dense FFN
115+
if ((uint32_t)il >= hparams.n_layer_dense_lead) {
116+
// MoE layer with sigmoid routing, normalization, and scaling
117+
ggml_tensor * moe_out = build_moe_ffn(cur,
118+
model.layers[il].ffn_gate_inp,
119+
model.layers[il].ffn_up_exps,
120+
model.layers[il].ffn_gate_exps,
121+
model.layers[il].ffn_down_exps,
122+
model.layers[il].ffn_exp_probs_b,
123+
n_expert, n_expert_used,
124+
LLM_FFN_SILU,
125+
hparams.expert_weights_norm != 0, // norm_w (route_norm=True)
126+
hparams.expert_weights_scale != 0.0f, // scale_w
127+
hparams.expert_weights_scale, // w_scale (route_scale=2.826)
128+
(llama_expert_gating_func_type) hparams.expert_gating_func,
129+
il);
130+
cb(moe_out, "ffn_moe_out", il);
131+
132+
// shared expert
133+
if (hparams.n_expert_shared > 0) {
134+
ggml_tensor * ffn_shexp = build_ffn(cur,
135+
model.layers[il].ffn_up_shexp, NULL, NULL,
136+
model.layers[il].ffn_gate_shexp, NULL, NULL,
137+
model.layers[il].ffn_down_shexp, NULL, NULL,
138+
NULL,
139+
LLM_FFN_SILU, LLM_FFN_PAR, il);
140+
cb(ffn_shexp, "ffn_shexp", il);
141+
142+
cur = ggml_add(ctx0, moe_out, ffn_shexp);
143+
cb(cur, "ffn_out", il);
144+
} else {
145+
cur = moe_out;
146+
}
147+
} else {
148+
// dense layer
149+
cur = build_ffn(cur,
150+
model.layers[il].ffn_up, NULL, NULL,
151+
model.layers[il].ffn_gate, NULL, NULL,
152+
model.layers[il].ffn_down, NULL, NULL,
153+
NULL,
154+
LLM_FFN_SILU, LLM_FFN_PAR, il);
155+
cb(cur, "ffn_out", il);
156+
}
157+
158+
// dual ffn normalization (post)
159+
cur = build_norm(cur,
160+
model.layers[il].ffn_post_norm, NULL,
161+
LLM_NORM_RMS, il);
162+
cb(cur, "ffn_post_norm", il);
163+
164+
cur = ggml_add(ctx0, cur, ffn_inp);
165+
cur = build_cvec(cur, il);
166+
cb(cur, "l_out", il);
167+
168+
// input for next layer
169+
inpL = cur;
170+
}
171+
172+
cur = inpL;
173+
174+
cur = build_norm(cur,
175+
model.output_norm, NULL,
176+
LLM_NORM_RMS, -1);
177+
cb(cur, "result_norm", -1);
178+
179+
res->t_embd = cur;
180+
181+
// lm_head
182+
cur = build_lora_mm(model.output, cur);
183+
cb(cur, "result_output", -1);
184+
res->t_logits = cur;
185+
186+
ggml_build_forward_expand(gf, cur);
187+
}

src/models/models.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,10 @@ struct llm_build_rwkv7_base : public llm_graph_context {
5757
int il) const;
5858
};
5959

60+
struct llm_build_afmoe : public llm_graph_context {
61+
llm_build_afmoe(const llama_model & model, const llm_graph_params & params);
62+
};
63+
6064
struct llm_build_apertus : public llm_graph_context {
6165
llm_build_apertus(const llama_model & model, const llm_graph_params & params);
6266
};

0 commit comments

Comments
 (0)