Skip to content

Commit e9ef96a

Browse files
committed
mtmd : clean up clip_n_output_tokens
1 parent f44f793 commit e9ef96a

File tree

3 files changed

+26
-40
lines changed

3 files changed

+26
-40
lines changed

tools/mtmd/clip.cpp

Lines changed: 25 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -3649,8 +3649,9 @@ int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 *
36493649
int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
36503650
const auto & params = ctx->model.hparams;
36513651

3652-
// only for models using fixed size square images
3653-
int n_patches_sq = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
3652+
// for models with fixed size image, the input image is already pre-processed and resized to square
3653+
int patch_size = params.patch_size;
3654+
int n_patches = (img->nx / patch_size) * (img->ny / patch_size);
36543655

36553656
projector_type proj = ctx->proj_type();
36563657

@@ -3664,27 +3665,27 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
36643665
case PROJECTOR_TYPE_LDPV2:
36653666
case PROJECTOR_TYPE_GLM_EDGE:
36663667
{
3667-
n_patches_sq /= 4;
3668+
n_patches /= 4;
36683669
if (ctx->model.mm_glm_tok_boi) {
3669-
n_patches_sq += 2; // for BOI and EOI token embeddings
3670+
n_patches += 2; // for BOI and EOI token embeddings
36703671
}
36713672
} break;
36723673
case PROJECTOR_TYPE_MINICPMV:
36733674
{
36743675
// Use actual config value if available, otherwise fall back to hardcoded values
36753676
if (params.minicpmv_query_num > 0) {
3676-
n_patches_sq = params.minicpmv_query_num;
3677+
n_patches = params.minicpmv_query_num;
36773678
} else {
36783679
// Fallback to hardcoded values for legacy models
36793680
if (params.minicpmv_version == 2) {
3680-
n_patches_sq = 96;
3681+
n_patches = 96;
36813682
} else if (params.minicpmv_version == 3) {
3682-
n_patches_sq = 64;
3683+
n_patches = 64;
36833684
} else if (params.minicpmv_version == 4) {
3684-
n_patches_sq = 64;
3685+
n_patches = 64;
36853686
} else if (params.minicpmv_version == 5) {
36863687
// MiniCPM-V 4.0
3687-
n_patches_sq = 64;
3688+
n_patches = 64;
36883689
} else {
36893690
GGML_ABORT("Unknown minicpmv version");
36903691
}
@@ -3693,67 +3694,56 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
36933694
case PROJECTOR_TYPE_QWEN2VL:
36943695
case PROJECTOR_TYPE_QWEN25VL:
36953696
{
3696-
// dynamic size
3697+
// dynamic size (2 conv, so double patch size)
36973698
int patch_size = params.patch_size * 2;
36983699
int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
36993700
int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
3700-
n_patches_sq = x_patch * y_patch;
3701+
n_patches = x_patch * y_patch;
37013702
} break;
37023703
case PROJECTOR_TYPE_GEMMA3:
3703-
{
3704-
int n_per_side = params.image_size / params.patch_size;
3705-
int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
3706-
n_patches_sq = n_per_side_2d_pool * n_per_side_2d_pool;
3707-
} break;
37083704
case PROJECTOR_TYPE_IDEFICS3:
37093705
case PROJECTOR_TYPE_INTERNVL:
3706+
case PROJECTOR_TYPE_LLAMA4:
3707+
case PROJECTOR_TYPE_LFM2:
37103708
{
37113709
// both W and H are divided by proj_scale_factor
3712-
n_patches_sq /= (params.proj_scale_factor * params.proj_scale_factor);
3710+
int scale_factor = ctx->model.hparams.proj_scale_factor;
3711+
n_patches /= (scale_factor * scale_factor);
37133712
} break;
37143713
case PROJECTOR_TYPE_PIXTRAL:
37153714
{
37163715
// dynamic size
37173716
int n_merge = params.spatial_merge_size;
3718-
int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
3719-
int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
3720-
n_patches_sq = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
3721-
} break;
3722-
case PROJECTOR_TYPE_LLAMA4:
3723-
{
3724-
int scale_factor = ctx->model.hparams.proj_scale_factor;
3725-
n_patches_sq /= (scale_factor * scale_factor);
3717+
int n_patches_x = img->nx / patch_size / (n_merge > 0 ? n_merge : 1);
3718+
int n_patches_y = img->ny / patch_size / (n_merge > 0 ? n_merge : 1);
3719+
n_patches = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
37263720
} break;
37273721
case PROJECTOR_TYPE_VOXTRAL:
37283722
case PROJECTOR_TYPE_ULTRAVOX:
37293723
case PROJECTOR_TYPE_QWEN2A:
37303724
{
3731-
n_patches_sq = img->nx;
3725+
n_patches = img->nx;
37323726

37333727
const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
37343728
if (ctx->model.audio_has_stack_frames()) {
37353729
GGML_ASSERT(proj_stack_factor > 0);
3736-
const int n_len = CLIP_ALIGN(n_patches_sq, proj_stack_factor);
3737-
n_patches_sq = n_len / proj_stack_factor;
3730+
const int n_len = CLIP_ALIGN(n_patches, proj_stack_factor);
3731+
n_patches = n_len / proj_stack_factor;
37383732
}
37393733

37403734
// whisper downscales input token by half after conv1d
3741-
n_patches_sq /= 2;
3735+
n_patches /= 2;
37423736

37433737
if (ctx->model.audio_has_avgpool()) {
37443738
// divide by 2 because of nn.AvgPool1d(2, stride=2)
3745-
n_patches_sq /= 2;
3739+
n_patches /= 2;
37463740
}
37473741
} break;
3748-
case PROJECTOR_TYPE_LFM2:
3749-
{
3750-
n_patches_sq = (img->nx / (params.patch_size * params.proj_scale_factor)) * (img->ny / (params.patch_size * params.proj_scale_factor));
3751-
} break;
37523742
default:
37533743
GGML_ABORT("unsupported projector type");
37543744
}
37553745

3756-
return n_patches_sq;
3746+
return n_patches;
37573747
}
37583748

37593749
static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {

tools/mtmd/clip.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,11 +82,6 @@ struct clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch
8282
*/
8383
void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, struct clip_image_u8 * img);
8484

85-
bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
86-
87-
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
88-
bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
89-
9085
/** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */
9186
bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
9287

tools/mtmd/tests.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ add_test_vision "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M"
6868
add_test_vision "ggml-org/InternVL2_5-1B-GGUF:Q8_0"
6969
add_test_vision "ggml-org/InternVL3-1B-Instruct-GGUF:Q8_0"
7070
add_test_vision "ggml-org/Qwen2.5-Omni-3B-GGUF:Q4_K_M"
71+
add_test_vision "ggml-org/LFM2-VL-450M-GGUF:Q8_0"
7172

7273
add_test_audio "ggml-org/ultravox-v0_5-llama-3_2-1b-GGUF:Q8_0"
7374
add_test_audio "ggml-org/Qwen2.5-Omni-3B-GGUF:Q4_K_M"

0 commit comments

Comments
 (0)