Skip to content

Commit 52d1ef3

Browse files
Merge branch 'add-fh1-rebased' of https://github.com/tiiuae/llama.cpp-public into add-fh1-rebased
2 parents 9a048d8 + 097df0e commit 52d1ef3

File tree

2 files changed

+2
-4
lines changed

2 files changed

+2
-4
lines changed

src/llama-model.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4530,7 +4530,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
45304530
// output
45314531
{
45324532
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hidden_size, n_vocab}, TENSOR_NOT_REQUIRED);
4533-
final_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0);
4533+
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0);
45344534
}
45354535

45364536
for (int i = 0; i < n_layer; ++i) {
@@ -14740,7 +14740,7 @@ struct llm_build_falcon_h1 : public llm_graph_context {
1474014740
cur = inpL;
1474114741

1474214742
cur = build_norm(cur,
14743-
model.final_norm, NULL,
14743+
model.output_norm, NULL,
1474414744
LLM_NORM_RMS, -1);
1474514745

1474614746
cb(cur, "result_norm", -1);

src/llama-model.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,6 @@ struct llama_layer {
173173
struct ggml_tensor * attn_norm_cross = nullptr;
174174
struct ggml_tensor * attn_norm_enc = nullptr;
175175
struct ggml_tensor * ssm_norm = nullptr;
176-
struct ggml_tensor * final_norm = nullptr;
177176

178177
// attention
179178
struct ggml_tensor * wq = nullptr;
@@ -365,7 +364,6 @@ struct llama_model {
365364
struct ggml_tensor * output = nullptr;
366365
struct ggml_tensor * output_b = nullptr;
367366
struct ggml_tensor * output_norm_enc = nullptr;
368-
struct ggml_tensor * final_norm = nullptr;
369367

370368
// classifier
371369
struct ggml_tensor * cls = nullptr;

0 commit comments

Comments
 (0)