We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
glm4moe
1 parent 22f060c commit c81de6eCopy full SHA for c81de6e
src/llama-model.cpp
@@ -13800,10 +13800,6 @@ struct llm_build_glm4_moe : public llm_graph_context {
13800
LLM_FFN_SILU, LLM_FFN_PAR, il);
13801
cb(cur, "ffn_out", il);
13802
} else {
13803
- // MoE layer with shared experts
13804
- const int64_t n_expert = hparams.n_expert;
13805
- const int64_t n_expert_used = hparams.n_expert_used;
13806
-
13807
// Process routed experts using existing MoE infrastructure
13808
ggml_tensor * routed_out = build_moe_ffn(cur,
13809
model.layers[il].ffn_gate_inp,
0 commit comments