Skip to content

Commit 1d0125b

Browse files
gabe-l-hartCISC
andauthored
feat: Add conversion support in GraniteHybrid for non-hybrid (all attn) (ggml-org#16177)
This is a configuration of the hparams in the GraniteHybrid architecture that devolves to the Granite (or GraniteMoe) architecture (ie Granite 3.x). It may be used for some models in the Granite 4 family with the GraniteHybrid architecture acting as a superset arch. Rather than support it directly in the c++ graph, we simply coerce the architecture flag back to the correct "granite" or "granitemoe" architecture. Branch: gabe-l-hart/GraniteNonHybridConversion Signed-off-by: Gabe Goodhart <[email protected]> Co-authored-by: Sigbjørn Skjæret <[email protected]>
1 parent 351f3da commit 1d0125b

File tree

1 file changed

+20
-2
lines changed

1 file changed

+20
-2
lines changed

convert_hf_to_gguf.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7656,6 +7656,21 @@ def __init__(self, *args, **kwargs):
76567656
if i not in self._attn_layers
76577657
]
76587658

7659+
# There are some models in this family that are non-hybrid, but keep the
7660+
# same parent class by setting all layers to "attention." If this is the
7661+
# case, the model architecture needs to be updated to a standard
7662+
# "granite" or "granitemoe" model
7663+
if not self._ssm_layers:
7664+
has_experts = self.find_hparam(["num_experts_per_tok"], optional=True)
7665+
new_arch = (
7666+
gguf.MODEL_ARCH.GRANITE_MOE
7667+
if has_experts else
7668+
gguf.MODEL_ARCH.GRANITE
7669+
)
7670+
self.model_arch = new_arch
7671+
self.gguf_writer.arch = gguf.MODEL_ARCH_NAMES[new_arch]
7672+
self.gguf_writer.add_architecture()
7673+
76597674
# n_group and d_inner are used during reshape_tensors for mamba2
76607675
# NOTE: Explicitly include hparam prefix prefix for d_model to
76617676
# disambiguate with top-level head_dim
@@ -7740,8 +7755,11 @@ def set_gguf_parameters(self):
77407755
self.gguf_writer.add_rope_dimension_count(rope_dim)
77417756
self.gguf_writer.add_head_count_kv(head_count_kv_vec)
77427757

7743-
## If Bamba, use rope, otherwise don't
7744-
use_rope = "BambaForCausalLM" in self.hparams["architectures"]
7758+
## If Bamba or non-hybrid, use rope, otherwise don't
7759+
use_rope = (
7760+
"BambaForCausalLM" in self.hparams["architectures"]
7761+
or not self._ssm_layers
7762+
)
77457763
self.gguf_writer.add_rope_scaling_finetuned(use_rope)
77467764
if not use_rope:
77477765
self.gguf_writer.add_context_length(2**20)

0 commit comments

Comments
 (0)