Skip to content

Commit 5251fe6

Browse files
Add GGUF for Mamba (#34200)
* add mamba architecture for gguf * add logic for weights conversion, some fixes and refactoring * add lm_head layers, unit test refactoring * more fixes for tests * remove lm_head creation * remove unused comments
1 parent eab6c49 commit 5251fe6

File tree

4 files changed

+93
-2
lines changed

4 files changed

+93
-2
lines changed

docs/source/en/gguf.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ For now the supported model architectures are the architectures that have been v
8686
- GPT2
8787
- Starcoder2
8888
- T5
89+
- Mamba
8990

9091
## Example usage
9192

src/transformers/integrations/ggml.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,19 @@
235235
"output.weight": "lm_head.weight",
236236
"output_norm": "model.norm",
237237
},
238+
"mamba": {
239+
"token_embd": "backbone.embeddings",
240+
"blk": "backbone.layers",
241+
"ssm_a": "mixer.A_log",
242+
"ssm_conv1d": "mixer.conv1d",
243+
"ssm_in": "mixer.in_proj",
244+
"ssm_out": "mixer.out_proj",
245+
"ssm_x": "mixer.x_proj",
246+
"ssm_dt": "mixer.dt_proj",
247+
"attn_norm": "norm",
248+
"output_norm": "backbone.norm_f",
249+
"output.weight": "lm_head.weight",
250+
},
238251
}
239252

240253

@@ -373,6 +386,17 @@
373386
"attention.head_count_kv": "num_key_value_heads",
374387
"attention.layer_norm_epsilon": "norm_epsilon",
375388
},
389+
"mamba": {
390+
"vocab_size": "vocab_size",
391+
"context_length": "max_position_embeddings",
392+
"embedding_length": "hidden_size",
393+
"attention.layer_norm_rms_epsilon": "layer_norm_epsilon",
394+
"block_count": "num_hidden_layers",
395+
"ssm.conv_kernel": "conv_kernel",
396+
"ssm.state_size": "state_size",
397+
"ssm.time_step_rank": "time_step_rank",
398+
"ssm.inner_size": "intermediate_size",
399+
},
376400
}
377401

378402
GGUF_TOKENIZER_MAPPING = {
@@ -768,6 +792,7 @@ def converted(self) -> Tokenizer:
768792
"gpt2": GGUFGPTConverter,
769793
"starcoder2": GGUFGPTConverter,
770794
"t5": GGUFT5Converter,
795+
"mamba": GGUFGPTConverter,
771796
}
772797

773798

src/transformers/modeling_gguf_pytorch_utils.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,19 @@ def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False):
220220
name = "lm_head.weight"
221221
parsed_parameters["tensors"][name] = torch.from_numpy(np.copy(weights))
222222
continue
223+
if architecture == "mamba":
224+
if "ssm_d" in name and "bias" not in name and "weight" not in name:
225+
# ssm_d has conflicts with ssm_dt in name checking
226+
# we have to explicitly check that name is exactly ssm_d
227+
name = name.replace("ssm_d", "mixer.D")
228+
if "ssm_conv1d.weight" in name:
229+
# for compatibility tensor ssm_conv1d must be (5120, 1, 4]) dim,
230+
# quantized one is (5120, 4)
231+
weights = np.expand_dims(weights, axis=1)
232+
if "ssm_a" in name:
233+
# Original exponential implementation
234+
# https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L2975-L2977
235+
weights = np.log(-weights)
223236

224237
for tensor_name in tensor_key_mapping:
225238
if tensor_name.format(bid=bid) in name:

tests/quantization/ggml/test_ggml.py

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,8 @@ class GgufIntegrationTests(unittest.TestCase):
5959
starcoder2_model_id = "QuantFactory/starcoder2-3b-GGUF"
6060
starcoder2_fp16_model_id = "brittlewis12/starcoder2-3b-GGUF"
6161
starcoder2_original_model_id = "bigcode/starcoder2-3b"
62+
mamba_original_model_id = "state-spaces/mamba-2.8b-hf"
63+
mamba_model_id = "jpodivin/mamba-2.8b-hf-GGUF"
6264

6365
# standard quants
6466
q4_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf"
@@ -102,6 +104,8 @@ class GgufIntegrationTests(unittest.TestCase):
102104
q6_k_gpt2_xl_model_id = "gpt2-xl.Q6_K.gguf"
103105
q6_k_starcoder2_model_id = "starcoder2-3b.Q6_K.gguf"
104106
fp16_starcoder2_gguf_model_id = "starcoder2-3b.fp16.gguf"
107+
q6_k_mamba_model_id = "ggml-model-Q6_K.gguf"
108+
fp16_mamba_model_id = "ggml-model-f16.gguf"
105109

106110
example_text = "Hello"
107111

@@ -573,6 +577,8 @@ def test_gpt2_weights_conversion_fp16(self):
573577
if layer_name in quantized_state_dict:
574578
self.assertTrue(original_params.shape == quantized_state_dict[layer_name].shape)
575579
torch.testing.assert_close(original_params, quantized_state_dict[layer_name])
580+
else:
581+
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
576582

577583
def test_gpt2_xl_Q6_K(self):
578584
tokenizer = AutoTokenizer.from_pretrained(self.gpt2_xl_model_id, gguf_file=self.q6_k_gpt2_xl_model_id)
@@ -639,6 +645,8 @@ def test_falcon7b_weights_conversion_fp16(self):
639645
if layer_name in quantized_state_dict:
640646
self.assertTrue(original_params.shape == quantized_state_dict[layer_name].shape)
641647
torch.testing.assert_close(original_params, quantized_state_dict[layer_name])
648+
else:
649+
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
642650

643651
def test_stablelm_q4_k_m(self):
644652
model = AutoModelForCausalLM.from_pretrained(
@@ -708,6 +716,8 @@ def test_stablelm_weights_conversion_fp16(self):
708716
if layer_name in converted_state_dict:
709717
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
710718
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
719+
else:
720+
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
711721

712722
def test_starcoder2_weights_conversion_fp16(self):
713723
original_model = AutoModelForCausalLM.from_pretrained(
@@ -727,10 +737,11 @@ def test_starcoder2_weights_conversion_fp16(self):
727737
original_state_dict = original_model.state_dict()
728738

729739
for layer_name, original_params in original_state_dict.items():
730-
if layer_name in converted_state_dict and layer_name != "lm_head.weight":
731-
# quantized models do not contain "lm_head.weight" layer
740+
if layer_name in converted_state_dict:
732741
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
733742
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
743+
else:
744+
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
734745

735746
def test_starcoder2_q6_k(self):
736747
example_function_text = "def print_hello_world():"
@@ -748,6 +759,47 @@ def test_starcoder2_q6_k(self):
748759
EXPECTED_TEXT = 'def print_hello_world():\n print("Hello World")\n\ndef print'
749760
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
750761

762+
def test_mamba_weights_conversion_fp16(self):
763+
original_model = AutoModelForCausalLM.from_pretrained(
764+
self.mamba_original_model_id,
765+
torch_dtype=torch.float16,
766+
)
767+
768+
converted_model = AutoModelForCausalLM.from_pretrained(
769+
self.mamba_model_id,
770+
gguf_file=self.fp16_mamba_model_id,
771+
torch_dtype=torch.float16,
772+
)
773+
774+
converted_state_dict = converted_model.state_dict()
775+
original_state_dict = original_model.state_dict()
776+
777+
for layer_name, original_params in original_state_dict.items():
778+
if layer_name in converted_state_dict:
779+
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
780+
if "mixer.A_log" in layer_name:
781+
# we should increase tolerance after exponential reversing
782+
# and performing np.log(-weights) operation as numbers are slightly different
783+
torch.testing.assert_close(original_params, converted_state_dict[layer_name], atol=1e-3, rtol=1e-3)
784+
else:
785+
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
786+
else:
787+
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
788+
789+
def test_mamba_q6_k(self):
790+
model = AutoModelForCausalLM.from_pretrained(
791+
self.mamba_model_id,
792+
gguf_file=self.q6_k_mamba_model_id,
793+
torch_dtype=torch.float16,
794+
)
795+
796+
tokenizer = AutoTokenizer.from_pretrained(self.mamba_model_id, gguf_file=self.q6_k_mamba_model_id)
797+
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
798+
out = model.generate(text, max_new_tokens=10)
799+
800+
EXPECTED_TEXT = "Hello,I answerthe question.\n\nA"
801+
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
802+
751803
def test_tokenization_xnli(self):
752804
import tqdm
753805
from datasets import load_dataset

0 commit comments

Comments
 (0)