Skip to content

Commit 8d0d01a

Browse files
ikawrakowIwan Kawrakow
andauthored
gpt-oss: duplicate experts biases when necessary (ikawrakow#829)
Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent 41bdd86 commit 8d0d01a

File tree

4 files changed

+47
-18
lines changed

4 files changed

+47
-18
lines changed

ggml/src/ggml-backend.cpp

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1862,7 +1862,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
18621862
std::vector<uint32_t> unique_ids;
18631863
ggml_tensor * last_ids_tensor = nullptr;
18641864

1865-
18661865
for (int i = 0; i < sched->n_splits; i++) {
18671866
#if IK_PRINT_TIMING
18681867
int64_t tim1 = ggml_time_us();
@@ -1872,7 +1871,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
18721871
ggml_backend_t split_backend = sched->backends[split_backend_id];
18731872
ggml_backend_t last_input_backend = nullptr;
18741873

1875-
int cur_arg = 0;
18761874

18771875
// copy the input tensors to the split backend
18781876
for (int j = 0; j < split->n_inputs; j++) {
@@ -1900,7 +1898,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
19001898
if (sched->only_active_experts && split->graph.n_nodes > 0 &&
19011899
ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
19021900
ggml_backend_buffer_is_host(input->buffer) &&
1903-
node->src[cur_arg] == input_cpy &&
19041901
(node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_MOE_FUSED_UP_GATE)) {
19051902

19061903
if (input_backend != last_input_backend) {
@@ -1922,7 +1919,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
19221919
}
19231920
}
19241921

1925-
int n_expert = input->ne[2];
1922+
int n_expert = node->src[0]->ne[2];
19261923

19271924
if (ids_tensor != last_ids_tensor) {
19281925
ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
@@ -1943,12 +1940,15 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
19431940
last_ids_tensor = ids_tensor;
19441941
}
19451942

1943+
const size_t expert_size = input->ne[2] > 1 ? input->nb[2] : input->nb[1];
1944+
1945+
if (input->ne[2] > 1) {
1946+
19461947
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1947-
const size_t expert_size = (node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_MOE_FUSED_UP_GATE) ? input->nb[2] : input->nb[1];
19481948
const size_t expert_offset = first_id * expert_size;
19491949
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
19501950
const size_t padding = 512;
1951-
const size_t padding_end = last_id < input->ne[2] - 1 ? std::min<size_t>(expert_size, padding) : 0;
1951+
const size_t padding_end = last_id < n_expert - 1 ? std::min<size_t>(expert_size, padding) : 0;
19521952

19531953
ggml_backend_tensor_set_async(split_backend,
19541954
input_cpy,
@@ -1974,7 +1974,11 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
19741974
first_id = next_on_id(last_id);
19751975
}
19761976

1977-
if (node->op == GGML_OP_MOE_FUSED_UP_GATE) ++cur_arg;
1977+
} else {
1978+
auto copy_size = ggml_nbytes(input);
1979+
ggml_backend_tensor_set_async(split_backend, input_cpy, input->data, 0, copy_size);
1980+
}
1981+
19781982
} else
19791983
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
19801984
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface

src/llama-build-context.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8151,12 +8151,16 @@ ggml_cgraph * llm_build_context::build_openai_moe() {
81518151
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_post_norm, nullptr, LLM_NORM_RMS, cb, il);
81528152
cb(cur, "attn_post_norm", il);
81538153

8154+
bool use_dup_bias = cur->ne[1] < 32 && model.layers[il].ffn_up_exps_b_dup &&
8155+
model.layers[il].ffn_gate_exps_b_dup &&
8156+
model.layers[il].ffn_down_exps_b_dup;
8157+
81548158
// MoE branch
81558159
cur = llm_build_moe_ffn(ctx0, lctx, cur,
81568160
model.layers[il].ffn_gate_inp, model.layers[il].ffn_gate_inp_b,
8157-
model.layers[il].ffn_up_exps, model.layers[il].ffn_up_exps_b,
8158-
model.layers[il].ffn_gate_exps, model.layers[il].ffn_gate_exps_b,
8159-
model.layers[il].ffn_down_exps, model.layers[il].ffn_down_exps_b,
8161+
model.layers[il].ffn_up_exps, use_dup_bias ? model.layers[il].ffn_up_exps_b_dup : model.layers[il].ffn_up_exps_b,
8162+
model.layers[il].ffn_gate_exps, use_dup_bias ? model.layers[il].ffn_gate_exps_b_dup : model.layers[il].ffn_gate_exps_b,
8163+
model.layers[il].ffn_down_exps, use_dup_bias ? model.layers[il].ffn_down_exps_b_dup : model.layers[il].ffn_down_exps_b,
81608164
nullptr,
81618165
n_expert, n_expert_used,
81628166
LLM_FFN_SWIGLU_OAI_MOE, false,

src/llama-load-tensors.cpp

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ struct create_tensors_helper : public create_tensors_helper_interface {
127127
llama_model_loader & ml;
128128
llama_model & model;
129129

130-
ggml_tensor * create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0);
130+
ggml_tensor * create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0,
131+
ggml_context ** actual_ctx = nullptr);
131132

132133
void create_default_embd_output(const LLM_TN & tn, int n_embd, int n_vocab, bool norm_bias);
133134
void create_embd_output(const LLM_TN & tn, int n_embd, int n_vocab, bool has_norm = true);
@@ -198,7 +199,8 @@ create_tensors_helper::create_tensors_helper(llama_model_loader & _ml, llama_mod
198199
}
199200
}
200201

201-
ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags) {
202+
ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne,
203+
int flags, ggml_context ** actual_context) {
202204
if (ml.tensor_buft_overrides) {
203205
for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
204206
std::regex pattern(overrides->pattern);
@@ -209,6 +211,7 @@ ggml_tensor * create_tensors_helper::create_tensor(ggml_context * ctx, const std
209211
}
210212
}
211213
}
214+
if (actual_context) *actual_context = ctx;
212215
return ml.create_tensor(ctx, name, ne, flags);
213216
}
214217

@@ -2311,21 +2314,36 @@ bool create_tensors_helper::create_openai_moe_tensors(const LLM_TN & tn) {
23112314

23122315
layer.attn_sinks = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, 0);
23132316

2317+
ggml_context *ctx_ffn_gate, *ctx_ffn_up, *ctx_ffn_down;
23142318
layer.ffn_gate_inp = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert}, 0);
2315-
layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
2316-
layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
2317-
layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
2319+
layer.ffn_gate_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0, &ctx_ffn_gate);
2320+
layer.ffn_down_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0, &ctx_ffn_down);
2321+
layer.ffn_up_exps = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0, &ctx_ffn_up);
23182322

23192323
// bias
23202324
layer.bq = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_head * n_rot}, 0);
23212325
layer.bk = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_head_kv * n_rot}, 0);
23222326
layer.bv = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_head_kv * n_rot}, 0);
23232327
layer.bo = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
23242328

2329+
ggml_context *ctx_ffn_gate_b, *ctx_ffn_up_b, *ctx_ffn_down_b;
23252330
layer.ffn_gate_inp_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "bias", i), {n_expert}, 0);
2326-
layer.ffn_gate_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
2327-
layer.ffn_down_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert}, 0);
2328-
layer.ffn_up_exps_b = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
2331+
layer.ffn_gate_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0, &ctx_ffn_gate_b);
2332+
layer.ffn_down_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert}, 0, &ctx_ffn_down_b);
2333+
layer.ffn_up_exps_b = create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert}, 0, &ctx_ffn_up_b);
2334+
2335+
if (ctx_ffn_gate_b != ctx_ffn_gate) {
2336+
layer.ffn_gate_exps_b_dup = create_tensor(ctx_ffn_gate, tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert},
2337+
llama_model_loader::TENSOR_DUPLICATED);
2338+
}
2339+
if (ctx_ffn_up_b != ctx_ffn_up) {
2340+
layer.ffn_up_exps_b_dup = create_tensor(ctx_ffn_up, tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert},
2341+
llama_model_loader::TENSOR_DUPLICATED);
2342+
}
2343+
if (ctx_ffn_down_b != ctx_ffn_down) {
2344+
layer.ffn_down_exps_b_dup = create_tensor(ctx_ffn_down, tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert},
2345+
llama_model_loader::TENSOR_DUPLICATED);
2346+
}
23292347
}
23302348
return use_mmap_buffer;
23312349
}

src/llama-model.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,9 @@ struct llama_layer {
178178
struct ggml_tensor * ffn_gate_exps_b = nullptr;
179179
struct ggml_tensor * ffn_down_exps_b = nullptr;
180180
struct ggml_tensor * ffn_up_exps_b = nullptr;
181+
struct ggml_tensor * ffn_gate_exps_b_dup = nullptr;
182+
struct ggml_tensor * ffn_down_exps_b_dup = nullptr;
183+
struct ggml_tensor * ffn_up_exps_b_dup = nullptr;
181184

182185
// ff shared expert (shexp)
183186
struct ggml_tensor * ffn_gate_inp_shexp = nullptr;

0 commit comments

Comments
 (0)