Skip to content

Commit 9a49ae9

Browse files
committed
llama : gemma3 : use output tensor if it exists in model weight
1 parent 4375415 commit 9a49ae9

File tree

2 files changed

+7
-1
lines changed

2 files changed

+7
-1
lines changed

gguf-py/gguf/constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,6 +1113,7 @@ class MODEL_TENSOR(IntEnum):
11131113
],
11141114
MODEL_ARCH.GEMMA3: [
11151115
MODEL_TENSOR.TOKEN_EMBD,
1116+
MODEL_TENSOR.OUTPUT,
11161117
MODEL_TENSOR.OUTPUT_NORM,
11171118
MODEL_TENSOR.ATTN_Q,
11181119
MODEL_TENSOR.ATTN_Q_NORM,

src/llama-model.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
25712571

25722572
// output
25732573
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2574-
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
2574+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2575+
2576+
// if output is NULL, init from the input tok embed
2577+
if (output == NULL) {
2578+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2579+
}
25752580

25762581
for (int i = 0; i < n_layer; ++i) {
25772582
auto & layer = layers[i];

0 commit comments

Comments
 (0)