Skip to content

Commit bbca33e

Browse files
committed
fix unused args
1 parent ecc5253 commit bbca33e

File tree

3 files changed

+0
-7
lines changed

3 files changed

+0
-7
lines changed

convert_hf_to_gguf.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6647,9 +6647,6 @@ def set_gguf_parameters(self):
66476647
self.gguf_writer.add_context_length(self.hparams.get("max_position_embeddings", 0))
66486648
self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
66496649

6650-
## Mamba mixer params ##
6651-
self.gguf_writer.add_ssm_head_dim(self.d_head)
6652-
66536650
## Attention params ##
66546651
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) # Override value 0 from Mamba2
66556652
self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])

gguf-py/gguf/gguf_writer.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -867,9 +867,6 @@ def add_ssm_group_count(self, value: int) -> None:
867867
def add_ssm_dt_b_c_rms(self, value: bool) -> None:
868868
self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value)
869869

870-
def add_ssm_head_dim(self, value: int) -> None:
871-
self.add_uint32(Keys.SSM.HEAD_DIM.format(arch=self.arch), value)
872-
873870
def add_tokenizer_model(self, model: str) -> None:
874871
self.add_string(Keys.Tokenizer.MODEL, model)
875872

src/llama-hparams.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ struct llama_hparams {
115115
uint32_t ssm_d_state = 0;
116116
uint32_t ssm_dt_rank = 0;
117117
uint32_t ssm_n_group = 0;
118-
uint32_t ssm_head_dim = 0;
119118

120119
// for hybrid state space models
121120
std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;

0 commit comments

Comments
 (0)