Skip to content

Commit 5f007c6

Browse files
authored
Merge pull request #31 from Thireus/tr/qwen3-vl-clean
Bugfix n_embd was declared multiple times
2 parents 274901e + 59ceaf8 commit 5f007c6

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

src/llama-load-tensors.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1000,10 +1000,11 @@ bool create_tensors_helper::create_qwen2_moe_tensors(const LLM_TN & tn) {
10001000

10011001
bool create_tensors_helper::create_qwen3_tensors(const LLM_TN & tn) {
10021002
LOADING_PRELUDE
1003+
10031004
// for model loading, the weights only have the main embd
10041005
// so we need to divide by the number of deepstack layers + 1
10051006
// n_embd is const int so we declare a new variable
1006-
int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1);
1007+
n_embd = n_embd / (hparams.n_deepstack_layers + 1);
10071008
model.tok_embd = create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
10081009

10091010
// output
@@ -1039,10 +1040,11 @@ bool create_tensors_helper::create_qwen3_tensors(const LLM_TN & tn) {
10391040

10401041
bool create_tensors_helper::create_qwen3_moe_tensors(const LLM_TN & tn) {
10411042
LOADING_PRELUDE
1043+
10421044
// for model loading, the weights only have the main embd
10431045
// so we need to divide by the number of deepstack layers + 1
10441046
// n_embd is const int so we declare a new variable
1045-
int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1);
1047+
n_embd = n_embd / (hparams.n_deepstack_layers + 1);
10461048
model.tok_embd = create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
10471049

10481050
// output

0 commit comments

Comments
 (0)