Skip to content

Commit 2745373

Browse files
committed
Merge branch 'master' into xsn/xiaomi_mimo
2 parents ff17782 + 36667c8 commit 2745373

File tree

6 files changed

+46
-17
lines changed

6 files changed

+46
-17
lines changed

convert_hf_to_gguf.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2124,6 +2124,9 @@ def __init__(self, *args, **kwargs):
21242124
# if n_heads_in_group is not None, then
21252125
# _num_kv_heads[il] is num_attention_head // n_heads_in_group and
21262126
# _num_heads[il] is num_attention_head
2127+
# ***dummy layer*** for nemotron 253B
2128+
# if n_heads_in_group is None and ffn_mult is None
2129+
# then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
21272130
for il in range(len(_block_configs)):
21282131
if _block_configs[il]["attention"]["n_heads_in_group"] is None:
21292132
if _block_configs[il]["attention"]["replace_with_linear"] is True:
@@ -2135,7 +2138,10 @@ def __init__(self, *args, **kwargs):
21352138
else:
21362139
self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
21372140
self._num_heads.append(self.hparams["num_attention_heads"])
2138-
_ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
2141+
if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
2142+
_ffn_multipliers.append(0.0)
2143+
else:
2144+
_ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
21392145
assert self.block_count == len(self._num_kv_heads)
21402146
assert self.block_count == len(self._num_heads)
21412147
assert self.block_count == len(_ffn_multipliers)

src/llama-model.cpp

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) {
8080
case LLM_TYPE_236B: return "236B";
8181
case LLM_TYPE_290B: return "290B";
8282
case LLM_TYPE_314B: return "314B";
83+
case LLM_TYPE_405B: return "405B";
8384
case LLM_TYPE_671B: return "671B";
8485
case LLM_TYPE_SMALL: return "0.1B";
8586
case LLM_TYPE_MEDIUM: return "0.4B";
@@ -582,6 +583,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
582583
switch (hparams.n_layer) {
583584
case 32: type = LLM_TYPE_7B; break;
584585
case 80: type = LLM_TYPE_70B; break;
586+
case 162: type = LLM_TYPE_405B; break;
585587
default: type = LLM_TYPE_UNKNOWN;
586588
}
587589
} break;
@@ -1848,7 +1850,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18481850
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
18491851
layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
18501852

1851-
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1853+
if (n_ff > 0) {
1854+
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1855+
}
18521856

18531857
if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
18541858
layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
@@ -1858,9 +1862,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18581862
layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
18591863
}
18601864

1861-
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1862-
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1863-
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1865+
if (n_ff > 0) {
1866+
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1867+
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1868+
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1869+
}
18641870

18651871
// optional MLP bias
18661872
layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
@@ -4711,6 +4717,7 @@ struct llm_build_deci : public llm_graph_context {
47114717
ggml_tensor * inpSA = inpL;
47124718
const int64_t n_head_kv = hparams.n_head_kv(il);
47134719
const int64_t n_head = hparams.n_head(il);
4720+
const int64_t n_ff = hparams.n_ff(il);
47144721

47154722
if (n_head == 0) {
47164723
// attention-free layer of Llama-3_1-Nemotron-51B
@@ -4786,6 +4793,11 @@ struct llm_build_deci : public llm_graph_context {
47864793
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
47874794
}
47884795

4796+
// FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
4797+
if (n_head == 0 && n_ff == 0) {
4798+
continue;
4799+
}
4800+
47894801
// For Granite architecture
47904802
if (hparams.f_residual_scale) {
47914803
cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);

src/llama-model.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ enum llm_type {
7676
LLM_TYPE_236B,
7777
LLM_TYPE_290B,
7878
LLM_TYPE_314B,
79+
LLM_TYPE_405B,
7980
LLM_TYPE_671B,
8081
LLM_TYPE_SMALL,
8182
LLM_TYPE_MEDIUM,

tools/llava/clip-impl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@
7575
#define TN_MM_PROJECTOR "mm.model.fc.weight" // idefics3
7676
#define TN_MM_PATCH_MERGER "mm.patch_merger.weight" // mistral small 3.1
7777
#define TN_TOK_IMG_BREAK "v.token_embd.img_break" // pixtral
78+
#define TN_TOK_GLM_BOI "adapter.boi" // glm-edge (these embeddings are not in text model)
79+
#define TN_TOK_GLM_EOI "adapter.eoi" // glm-edge (these embeddings are not in text model)
7880

7981
// mimicpmv
8082
#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"

tools/llava/clip.cpp

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -249,9 +249,11 @@ struct clip_vision_model {
249249
struct ggml_tensor * mm_4_w = nullptr;
250250
struct ggml_tensor * mm_4_b = nullptr;
251251

252-
//GLMV-Edge projection
252+
// GLMV-Edge projection
253253
struct ggml_tensor * mm_model_adapter_conv_w = nullptr;
254254
struct ggml_tensor * mm_model_adapter_conv_b = nullptr;
255+
struct ggml_tensor * mm_glm_tok_boi = nullptr;
256+
struct ggml_tensor * mm_glm_tok_eoi = nullptr;
255257

256258
// MobileVLM projection
257259
struct ggml_tensor * mm_model_mlp_1_w = nullptr;
@@ -1559,6 +1561,13 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
15591561
embeddings = ggml_mul(ctx0, embeddings,x);
15601562
embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
15611563
}
1564+
// arrangement of BOI/EOI token embeddings
1565+
// note: these embeddings are not present in text model, hence we cannot process them as text tokens
1566+
// see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
1567+
{
1568+
embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
1569+
embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
1570+
}
15621571
}
15631572

15641573
else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
@@ -1972,12 +1981,14 @@ struct clip_model_loader {
19721981
{
19731982
vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
19741983
vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
1975-
vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR,"weight"));
1976-
vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"weight"));
1977-
vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"bias"));
1978-
vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H,"weight"));
1979-
vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE,"weight"));
1980-
vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H,"weight"));
1984+
vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
1985+
vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
1986+
vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
1987+
vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
1988+
vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
1989+
vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
1990+
vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
1991+
vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
19811992
} break;
19821993
case PROJECTOR_TYPE_QWEN2VL:
19831994
case PROJECTOR_TYPE_QWEN25VL:
@@ -2948,6 +2959,7 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
29482959

29492960
if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
29502961
n_patches /= 4;
2962+
n_patches += 2; // for BOI and EOI token embeddings
29512963
} else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
29522964
if (ctx->minicpmv_version == 2) {
29532965
n_patches = 96;

tools/llava/mtmd.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -189,11 +189,6 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
189189
marker_modified = "<start_of_image>" + ctx->image_marker + "<end_of_image>";
190190
string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
191191

192-
} else if (proj_type == PROJECTOR_TYPE_GLM_EDGE) {
193-
// <|begin_of_image|> ... (image embeddings) ... <|end_of_image|>
194-
marker_modified = "<|begin_of_image|>" + ctx->image_marker + "<|end_of_image|>";
195-
string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
196-
197192
} else if (proj_type == PROJECTOR_TYPE_IDEFICS3) {
198193
// https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215
199194
marker_modified = "<fake_token_around_image><global-img>" + ctx->image_marker + "<fake_token_around_image>";
@@ -213,6 +208,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
213208
}
214209

215210
// llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix
211+
// for glm-edge, BOI and EOI token's embeddings are not present in the text model
216212

217213
std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker);
218214
output.clear();

0 commit comments

Comments
 (0)