Skip to content

Commit 0ed1f38

Browse files
authored
Merge branch 'ikawrakow:main' into main
2 parents 42d8c6a + 6051ba2 commit 0ed1f38

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6741,7 +6741,7 @@ static bool llm_load_tensors(
67416741
layer.nextn.embed_tokens = create_tensor(ctx_for_layer(final_layer),
67426742
tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", final_layer),
67436743
{ n_embd, n_vocab },
6744-
flags);
6744+
flags | TENSOR_NOT_REQUIRED);
67456745
// ENORM, HNORM: [embd]
67466746
layer.nextn.enorm = create_tensor(ctx_for_layer(final_layer),
67476747
tn(LLM_TENSOR_NEXTN_ENORM, "weight", final_layer),
@@ -6755,12 +6755,12 @@ static bool llm_load_tensors(
67556755
layer.nextn.shared_head_head = create_tensor(ctx_for_layer(final_layer),
67566756
tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", final_layer),
67576757
{ n_embd, n_vocab },
6758-
flags);
6758+
flags | TENSOR_NOT_REQUIRED);
67596759
// SHARED_HEAD_NORM: [embd]
67606760
layer.nextn.shared_head_norm = create_tensor(ctx_for_layer(final_layer),
67616761
tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", final_layer),
67626762
{ n_embd },
6763-
flags);
6763+
flags | TENSOR_NOT_REQUIRED);
67646764
}
67656765
}
67666766
}

0 commit comments

Comments
 (0)