Skip to content

Commit 94ab3a8

Browse files
committed
falcon-h1: fix whitespace issues
1 parent 6dde986 commit 94ab3a8

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

src/llama-model.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2084,6 +2084,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
20842084
const int64_t q_lora_rank = hparams.n_lora_q;
20852085
const int64_t kv_lora_rank = hparams.n_lora_kv;
20862086
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2087+
20872088
// output
20882089
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
20892090
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
@@ -4550,7 +4551,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
45504551

45514552
// embeddings
45524553
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, 0);
4553-
4554+
45544555
// output
45554556
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hidden_size, n_vocab}, TENSOR_NOT_REQUIRED);
45564557
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0);

0 commit comments

Comments
 (0)