Skip to content

Commit 0034230

Browse files
committed
Yet another post-merge dupe.
1 parent 279301e commit 0034230

File tree

1 file changed

+0
-144
lines changed

1 file changed

+0
-144
lines changed

src/llama-model.cpp

Lines changed: 0 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -17404,150 +17404,6 @@ struct llm_build_bailingmoe2 : public llm_graph_context {
1740417404
}
1740517405
};
1740617406

17407-
struct llm_build_bailingmoe2 : public llm_graph_context {
17408-
llm_build_bailingmoe2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
17409-
const int64_t n_embd_head = hparams.n_embd_head_v;
17410-
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
17411-
17412-
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
17413-
17414-
ggml_tensor * cur;
17415-
ggml_tensor * inpL;
17416-
17417-
inpL = build_inp_embd(model.tok_embd);
17418-
17419-
// inp_pos - contains the positions
17420-
ggml_tensor * inp_pos = build_inp_pos();
17421-
17422-
auto * inp_attn = build_attn_inp_kv();
17423-
17424-
ggml_tensor * inp_out_ids = build_inp_out_ids();
17425-
17426-
const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
17427-
for (int il = 0; il < n_transformer_layers; ++il) {
17428-
ggml_tensor * inpSA = inpL;
17429-
17430-
// norm
17431-
cur = build_norm(inpL,
17432-
model.layers[il].attn_norm, NULL,
17433-
LLM_NORM_RMS, il);
17434-
cb(cur, "attn_norm", il);
17435-
17436-
// self_attention
17437-
{
17438-
cur = build_lora_mm(model.layers[il].wqkv, cur);
17439-
cb(cur, "wqkv", il);
17440-
17441-
ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd));
17442-
ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd));
17443-
ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa));
17444-
17445-
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
17446-
cb(Qcur, "Qcur_normed", il);
17447-
17448-
Qcur = ggml_rope_ext(
17449-
ctx0, Qcur, inp_pos, nullptr,
17450-
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
17451-
ext_factor, attn_factor, beta_fast, beta_slow
17452-
);
17453-
17454-
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
17455-
cb(Kcur, "Kcur_normed", il);
17456-
17457-
Kcur = ggml_rope_ext(
17458-
ctx0, Kcur, inp_pos, nullptr,
17459-
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
17460-
ext_factor, attn_factor, beta_fast, beta_slow
17461-
);
17462-
17463-
cb(Qcur, "Qcur", il);
17464-
cb(Kcur, "Kcur", il);
17465-
cb(Vcur, "Vcur", il);
17466-
17467-
cur = build_attn(inp_attn,
17468-
model.layers[il].wo, model.layers[il].bo,
17469-
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
17470-
}
17471-
17472-
if (il == n_transformer_layers - 1 && inp_out_ids) {
17473-
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
17474-
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
17475-
}
17476-
17477-
ggml_tensor * sa_out = ggml_add(ctx0, cur, inpSA);
17478-
cb(sa_out, "sa_out", il);
17479-
17480-
// MoE branch
17481-
cur = build_norm(sa_out,
17482-
model.layers[il].ffn_norm, NULL,
17483-
LLM_NORM_RMS, il);
17484-
cb(cur, "ffn_norm", il);
17485-
17486-
if (static_cast<uint32_t>(il) < hparams.n_layer_dense_lead) {
17487-
cur = build_ffn(cur,
17488-
model.layers[il].ffn_up, NULL, NULL,
17489-
model.layers[il].ffn_gate, NULL, NULL,
17490-
model.layers[il].ffn_down, NULL, NULL,
17491-
NULL,
17492-
LLM_FFN_SILU, LLM_FFN_PAR, il);
17493-
cb(cur, "ffn_out", il);
17494-
} else {
17495-
ggml_tensor * moe_out =
17496-
build_moe_ffn(cur,
17497-
model.layers[il].ffn_gate_inp,
17498-
model.layers[il].ffn_up_exps,
17499-
model.layers[il].ffn_gate_exps,
17500-
model.layers[il].ffn_down_exps,
17501-
model.layers[il].ffn_exp_probs_b,
17502-
n_expert, n_expert_used,
17503-
LLM_FFN_SILU, hparams.expert_weights_norm,
17504-
true, hparams.expert_weights_scale,
17505-
(llama_expert_gating_func_type) hparams.expert_gating_func,
17506-
il);
17507-
cb(moe_out, "ffn_moe_out", il);
17508-
17509-
{
17510-
ggml_tensor * ffn_shexp = build_ffn(cur,
17511-
model.layers[il].ffn_up_shexp, NULL, NULL,
17512-
model.layers[il].ffn_gate_shexp, NULL, NULL,
17513-
model.layers[il].ffn_down_shexp, NULL, NULL,
17514-
NULL,
17515-
LLM_FFN_SILU, LLM_FFN_PAR, il);
17516-
cb(ffn_shexp, "ffn_shexp", il);
17517-
17518-
cur = ggml_add(ctx0, moe_out, ffn_shexp);
17519-
cb(cur, "ffn_out", il);
17520-
}
17521-
}
17522-
17523-
cur = ggml_add(ctx0, cur, sa_out);
17524-
17525-
cur = build_cvec(cur, il);
17526-
cb(cur, "l_out", il);
17527-
17528-
// input for next layer
17529-
inpL = cur;
17530-
}
17531-
17532-
cur = inpL;
17533-
17534-
cur = build_norm(cur,
17535-
model.output_norm, NULL,
17536-
LLM_NORM_RMS, -1);
17537-
17538-
cb(cur, "result_norm", -1);
17539-
res->t_embd = cur;
17540-
17541-
// lm_head
17542-
cur = build_lora_mm(model.output, cur);
17543-
17544-
cb(cur, "result_output", -1);
17545-
res->t_logits = cur;
17546-
17547-
ggml_build_forward_expand(gf, cur);
17548-
}
17549-
};
17550-
1755117407
struct llm_build_dots1 : public llm_graph_context {
1755217408
llm_build_dots1(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
1755317409
const int64_t n_embd_head = hparams.n_embd_head_v;

0 commit comments

Comments
 (0)