Skip to content

Commit 90cd13d

Browse files
tamarPaltamarPal
authored andcommitted
fix: remove trailing whitespace
1 parent 053234f commit 90cd13d

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

src/llama-context.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1383,7 +1383,7 @@ void llama_context::output_reorder() {
13831383

13841384
uint32_t llama_context::graph_max_nodes() const {
13851385
uint32_t base_nodes = std::max<uint32_t>(1024u, 8u*model.n_tensors());
1386-
1386+
13871387
// Megrez-MoE creates many intermediate tensors in build_mergez_moe_ffn for each layer:
13881388
// - sigmoid, add (bias), reshape (3x), get_rows, sum_rows, div, view_2d, mul_mat (per expert)
13891389
// - ggml_top_k internally calls ggml_argsort + ggml_view_4d (2 more tensors per layer)
@@ -1395,7 +1395,8 @@ uint32_t llama_context::graph_max_nodes() const {
13951395
// Double it to 4096 for safety margin during warmup's triple graph construction
13961396
base_nodes += 4096;
13971397
}
1398-
1398+
1399+
13991400
return base_nodes;
14001401
}
14011402

src/models/megrez-moe.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ llm_build_megrez_moe::llm_build_megrez_moe(const llama_model & model, const llm_
168168
// Compute gate logits from pre_gate_hidden instead of cur
169169
ggml_tensor * gate_logits = build_lora_mm(model.layers[il].ffn_gate_inp, pre_gate_hidden);
170170
cb(gate_logits, "ffn_moe_logits", il);
171-
171+
172172
// Use standard build_moe_ffn but with pre-computed gate logits
173173
ggml_tensor * moe_out = build_moe_ffn(cur,
174174
model.layers[il].ffn_gate_inp,

0 commit comments

Comments
 (0)