Skip to content

Commit a342f52

Browse files
committed
correct projector
1 parent 03a4a49 commit a342f52

File tree

5 files changed

+35
-67
lines changed

5 files changed

+35
-67
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3402,15 +3402,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
34023402
if "vision_model" in name or "mlp_AR" in name:
34033403
if "packing_position_embedding" in name:
34043404
return [] # unused
3405-
elif "head.attention.in_proj_" in name:
3406-
chunks = data_torch.chunk(3, dim=0)
3407-
name = name.replace("_bias", ".bias")
3408-
name = name.replace("_weight", ".weight")
3409-
return [
3410-
(self.map_tensor_name(name.replace("in_proj", "in_proj_q")), chunks[0]),
3411-
(self.map_tensor_name(name.replace("in_proj", "in_proj_k")), chunks[1]),
3412-
(self.map_tensor_name(name.replace("in_proj", "in_proj_v")), chunks[2]),
3413-
]
3405+
elif "vision_model.head" in name:
3406+
# we don't yet support image embeddings for this model
3407+
return []
34143408
else:
34153409
return [(self.map_tensor_name(name), data_torch)]
34163410
return [] # skip other tensors

gguf-py/gguf/constants.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -640,8 +640,6 @@ class MODEL_TENSOR(IntEnum):
640640
V_RESMPL_Q_NORM = auto() # minicpmv
641641
V_RESMPL_PROJ = auto() # minicpmv
642642
V_RESMPL_QUERY = auto() # minicpmv
643-
V_RESMPL_FFN_UP = auto() # PaddleOCR-VL
644-
V_RESMPL_FFN_DOWN = auto() # PaddleOCR-VL
645643
V_TOK_EMBD_IMG_BREAK = auto() # pixtral
646644
V_MM_PATCH_MERGER = auto() # mistral small 3.1
647645
# audio (mtmd)
@@ -988,8 +986,6 @@ class MODEL_TENSOR(IntEnum):
988986
MODEL_TENSOR.V_RESMPL_Q_NORM: "resampler.ln_q",
989987
MODEL_TENSOR.V_RESMPL_PROJ: "resampler.proj",
990988
MODEL_TENSOR.V_RESMPL_QUERY: "resampler.query",
991-
MODEL_TENSOR.V_RESMPL_FFN_UP: "resampler.ffn_up",
992-
MODEL_TENSOR.V_RESMPL_FFN_DOWN: "resampler.ffn_down",
993989
MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: "v.token_embd.img_break", # pixtral
994990
MODEL_TENSOR.V_MM_PATCH_MERGER: "mm.patch_merger", # mistral small 3.1
995991
# audio (mtmd)

gguf-py/gguf/tensor_mapping.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1352,17 +1352,14 @@ class TensorNameMap:
13521352

13531353
MODEL_TENSOR.V_RESMPL_ATTN_Q: (
13541354
"resampler.attn.in_proj_q", # tensor generated from resampler.attn.in_proj
1355-
"model.vision_model.head.attention.in_proj_q", # PaddleOCR-VL (generated tensor)
13561355
),
13571356

13581357
MODEL_TENSOR.V_RESMPL_ATTN_K: (
13591358
"resampler.attn.in_proj_k", # tensor generated from resampler.attn.in_proj
1360-
"model.vision_model.head.attention.in_proj_k", # PaddleOCR-VL (generated tensor)
13611359
),
13621360

13631361
MODEL_TENSOR.V_RESMPL_ATTN_V: (
13641362
"resampler.attn.in_proj_v", # tensor generated from resampler.attn.in_proj
1365-
"model.vision_model.head.attention.in_proj_v", # PaddleOCR-VL (generated tensor)
13661363
),
13671364

13681365
MODEL_TENSOR.V_RESMPL_ATTN_OUT: (
@@ -1376,7 +1373,6 @@ class TensorNameMap:
13761373

13771374
MODEL_TENSOR.V_RESMPL_POST_NORM: (
13781375
"resampler.ln_post",
1379-
"model.vision_model.head.layernorm", # PaddleOCR-VL
13801376
),
13811377

13821378
MODEL_TENSOR.V_RESMPL_KV_NORM: (
@@ -1393,15 +1389,6 @@ class TensorNameMap:
13931389

13941390
MODEL_TENSOR.V_RESMPL_QUERY: (
13951391
"resampler.query",
1396-
"model.vision_model.head.probe", # PaddleOCR-VL
1397-
),
1398-
1399-
MODEL_TENSOR.V_RESMPL_FFN_UP: (
1400-
"model.vision_model.head.mlp.fc1", # PaddleOCR-VL
1401-
),
1402-
1403-
MODEL_TENSOR.V_RESMPL_FFN_DOWN: (
1404-
"model.vision_model.head.mlp.fc2", # PaddleOCR-VL
14051392
),
14061393

14071394
MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: (

tools/mtmd/clip.cpp

Lines changed: 32 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -342,10 +342,6 @@ struct clip_model {
342342
ggml_tensor * mm_model_ln_kv_b = nullptr;
343343
ggml_tensor * mm_model_ln_post_w = nullptr;
344344
ggml_tensor * mm_model_ln_post_b = nullptr;
345-
ggml_tensor * mm_model_ffn_up_w = nullptr;
346-
ggml_tensor * mm_model_ffn_up_b = nullptr;
347-
ggml_tensor * mm_model_ffn_down_w = nullptr;
348-
ggml_tensor * mm_model_ffn_down_b = nullptr;
349345

350346
// gemma3
351347
ggml_tensor * mm_input_proj_w = nullptr;
@@ -1169,38 +1165,33 @@ struct clip_graph {
11691165
cb(cur, "vit_out", -1);
11701166

11711167
{
1172-
// SiglipMultiheadAttentionPoolingHead
1173-
int64_t n_pos = cur->ne[1];
1174-
ggml_tensor * Qcur = ggml_repeat(ctx0, model.mm_model_query, cur);
1175-
ggml_tensor * Kcur = cur;
1176-
ggml_tensor * Vcur = cur;
1177-
1178-
Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
1179-
Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
1180-
Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
1181-
1182-
cb(Qcur, "resampl_Qcur", -1);
1183-
cb(Kcur, "resampl_Kcur", -1);
1184-
cb(Vcur, "resampl_Vcur", -1);
1185-
1186-
float kq_scale = 1.0f / sqrtf((float)(d_head));
1187-
cur = build_attn(model.mm_model_attn_o_w, model.mm_model_attn_o_b,
1188-
Qcur, Kcur, Vcur, nullptr, kq_scale, -1);
1189-
1190-
cb(cur, "resampl_attn_out", -1);
1191-
1192-
cur = build_norm(cur, model.mm_model_ln_post_w, model.mm_model_ln_post_b,
1193-
NORM_TYPE_NORMAL, eps, -1);
1194-
1195-
cb(cur, "resampl_out", -1);
1196-
}
1168+
// mlp_AR
1169+
float proj_norm_eps = 1e-5; // PaddleOCR uses hard-coded value eps=1e-5 for Projector
1170+
cur = build_norm(cur,
1171+
model.mm_input_norm_w, model.mm_input_norm_b,
1172+
NORM_TYPE_NORMAL, proj_norm_eps, -1);
1173+
//cur = build_patch_merge_permute(cur, hparams.proj_scale_factor);
1174+
1175+
// stack and padding
1176+
int64_t stride = hparams.proj_scale_factor * hparams.proj_scale_factor;
1177+
int64_t n_embd = cur->ne[0];
1178+
int64_t n_tokens = cur->ne[1];
1179+
int64_t n_tokens_padded = CLIP_ALIGN(n_tokens, stride);
1180+
int64_t n_pad = n_tokens_padded - n_tokens;
1181+
if (n_pad > 0) {
1182+
cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
1183+
cur = ggml_pad(ctx0, cur, n_pad * n_embd, 0, 0, 0);
1184+
}
1185+
cur = ggml_view_2d(ctx0, cur,
1186+
n_embd * stride,
1187+
n_tokens_padded / stride,
1188+
ggml_row_size(cur->type, n_embd * stride), 0);
1189+
cb(cur, "after_stacked", -1);
11971190

1198-
{
1199-
// SiglipMLP
12001191
cur = build_ffn(cur,
1201-
model.mm_model_ffn_up_w, model.mm_model_ffn_up_b,
1192+
model.mm_1_w, model.mm_1_b,
12021193
nullptr, nullptr,
1203-
model.mm_model_ffn_down_w, model.mm_model_ffn_down_b,
1194+
model.mm_2_w, model.mm_2_b,
12041195
hparams.ffn_op, -1);
12051196
cb(cur, "mlp_out", -1);
12061197
}
@@ -2521,7 +2512,7 @@ struct clip_model_loader {
25212512
} break;
25222513
case PROJECTOR_TYPE_PADDLEOCR:
25232514
{
2524-
hparams.proj_scale_factor = 1;
2515+
hparams.proj_scale_factor = 2;
25252516
} break;
25262517
default:
25272518
break;
@@ -2862,11 +2853,6 @@ struct clip_model_loader {
28622853
model.mm_model_attn_o_b = get_tensor(string_format(TN_RESAMPL_ATTN, "out", "bias"));
28632854
model.mm_model_ln_post_w = get_tensor(string_format(TN_RESAMPL_LN, "post", "weight"));
28642855
model.mm_model_ln_post_b = get_tensor(string_format(TN_RESAMPL_LN, "post", "bias"));
2865-
// resampler ffn
2866-
model.mm_model_ffn_up_w = get_tensor(string_format(TN_RESAMPL_FFN_UP, "weight"));
2867-
model.mm_model_ffn_up_b = get_tensor(string_format(TN_RESAMPL_FFN_UP, "bias"));
2868-
model.mm_model_ffn_down_w = get_tensor(string_format(TN_RESAMPL_FFN_DOWN, "weight"));
2869-
model.mm_model_ffn_down_b = get_tensor(string_format(TN_RESAMPL_FFN_DOWN, "bias"));
28702856
// projector ffn
28712857
model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
28722858
model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
@@ -3967,7 +3953,6 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
39673953
} break;
39683954
case PROJECTOR_TYPE_LFM2:
39693955
case PROJECTOR_TYPE_KIMIVL:
3970-
case PROJECTOR_TYPE_PADDLEOCR:
39713956
{
39723957
// dynamic size
39733958
int scale_factor = ctx->model.hparams.proj_scale_factor;
@@ -3976,6 +3961,13 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
39763961
int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
39773962
n_patches = x_patch * y_patch;
39783963
} break;
3964+
case PROJECTOR_TYPE_PADDLEOCR:
3965+
{
3966+
// dynamic size
3967+
int scale_factor = ctx->model.hparams.proj_scale_factor;
3968+
int stride = scale_factor * scale_factor;
3969+
n_patches = CLIP_ALIGN(n_patches, stride) / stride;
3970+
} break;
39793971
case PROJECTOR_TYPE_PIXTRAL:
39803972
{
39813973
// dynamic size

tools/mtmd/mtmd-cli.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ struct mtmd_cli_context {
8787

8888
int n_threads = 1;
8989
llama_pos n_past = 0;
90-
bool use_jinja = false;
9190

9291
mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
9392
model = llama_init.model.get();

0 commit comments

Comments
 (0)