Skip to content

Commit 6943f4e

Browse files
committed
falcon-h1: remove unused ssm_in_b and bad merge
1 parent 521e823 commit 6943f4e

File tree

2 files changed

+1
-4
lines changed

2 files changed

+1
-4
lines changed

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4560,7 +4560,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
45604560
/*SSM LAYERS*/
45614561
// ssm in
45624562
layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {hidden_size, ssm_projection_size}, 0);
4563-
layer.ssm_in_b = create_tensor(tn(LLM_TENSOR_SSM_IN, "bias", i), {n_embd, ssm_projection_size}, TENSOR_NOT_REQUIRED);
45644563
// ssm 1d conv
45654564
layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {ssm_conv_kernel_size, ssm_conv_dim}, 0);
45664565
layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {ssm_conv_dim}, TENSOR_NOT_REQUIRED);
@@ -4613,6 +4612,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
46134612

46144613
for (int i = 0; i < n_layer; ++i) {
46154614
auto & layer = layers[i];
4615+
46164616
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
46174617

46184618
layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);

src/llama-model.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -225,9 +225,6 @@ struct llama_layer {
225225
struct ggml_tensor * ffn_down_enc = nullptr;
226226
struct ggml_tensor * ffn_up_enc = nullptr;
227227

228-
// falcon_h1
229-
struct ggml_tensor * ssm_in_b = nullptr;
230-
231228
// ff MoE
232229
struct ggml_tensor * ffn_gate_inp = nullptr;
233230
struct ggml_tensor * ffn_gate_exps = nullptr;

0 commit comments

Comments
 (0)