Skip to content

Commit 0dda56d

Browse files
committed
qwen3 : fix typo
1 parent 5c3a0d7 commit 0dda56d

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

src/models/models.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,7 @@ struct llm_build_qwen3next : public llm_graph_context_mamba {
431431
ggml_tensor * inp_pos,
432432
int il);
433433

434-
ggml_tensor * buil_layer_attn_linear(
434+
ggml_tensor * build_layer_attn_linear(
435435
llm_graph_input_rs * inp,
436436
ggml_tensor * cur,
437437
ggml_tensor * causal_mask,

src/models/qwen3next.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ llm_build_qwen3next::llm_build_qwen3next(const llama_model & model, const llm_gr
3434
// Determine layer type and build appropriate attention mechanism
3535
if (hparams.is_recurrent(il)) {
3636
// Linear attention layer (gated delta net)
37-
cur = buil_layer_attn_linear(inp->get_recr(), cur, causal_mask, identity, il);
37+
cur = build_layer_attn_linear(inp->get_recr(), cur, causal_mask, identity, il);
3838
} else {
3939
// Full attention layer
4040
cur = build_layer_attn(inp->get_attn(), cur, inp_pos, il);
@@ -707,7 +707,7 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
707707
return cur;
708708
}
709709

710-
ggml_tensor * llm_build_qwen3next::buil_layer_attn_linear(
710+
ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
711711
llm_graph_input_rs * inp,
712712
ggml_tensor * cur,
713713
ggml_tensor * causal_mask,

0 commit comments

Comments
 (0)