Skip to content

Commit 5b4673b

Browse files
committed
llama : rename expert_weights_b to exp_probs_b
1 parent 140eb29 commit 5b4673b

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10264,7 +10264,7 @@ static struct ggml_tensor * llm_build_moe_ffn(
1026410264
struct ggml_tensor * up_exps,
1026510265
struct ggml_tensor * gate_exps,
1026610266
struct ggml_tensor * down_exps,
10267-
struct ggml_tensor * expert_weights_b,
10267+
struct ggml_tensor * exp_probs_b,
1026810268
int64_t n_expert,
1026910269
int64_t n_expert_used,
1027010270
llm_ffn_op_type type_op,
@@ -10298,8 +10298,8 @@ llm_expert_gating_func_type gating_op,
1029810298
// add experts selection bias - introduced in DeepSeek V3
1029910299
// leave probs unbiased as it's later used to get expert weights
1030010300
ggml_tensor * selection_probs = probs;
10301-
if (expert_weights_b != nullptr) {
10302-
selection_probs = ggml_add(ctx, probs, expert_weights_b);
10301+
if (exp_probs_b != nullptr) {
10302+
selection_probs = ggml_add(ctx, probs, exp_probs_b);
1030310303
cb(selection_probs, "ffn_moe_probs_biased", il);
1030410304
}
1030510305

0 commit comments

Comments
 (0)