Skip to content

Commit c81de6e

Browse files
authored
Fix glm4moe bug (#15088)
1 parent 22f060c commit c81de6e

File tree

1 file changed

+0
-4
lines changed

1 file changed

+0
-4
lines changed

src/llama-model.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13800,10 +13800,6 @@ struct llm_build_glm4_moe : public llm_graph_context {
1380013800
LLM_FFN_SILU, LLM_FFN_PAR, il);
1380113801
cb(cur, "ffn_out", il);
1380213802
} else {
13803-
// MoE layer with shared experts
13804-
const int64_t n_expert = hparams.n_expert;
13805-
const int64_t n_expert_used = hparams.n_expert_used;
13806-
1380713803
// Process routed experts using existing MoE infrastructure
1380813804
ggml_tensor * routed_out = build_moe_ffn(cur,
1380913805
model.layers[il].ffn_gate_inp,

0 commit comments

Comments
 (0)