We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ccca0fb commit 7360d43Copy full SHA for 7360d43
examples/llava/mtmd.cpp
@@ -58,6 +58,8 @@ struct mtmd_context {
58
}
59
this->text_model = text_model;
60
61
+ GGML_ASSERT(!clip_is_qwen2vl(ctx_clip) && "Qwen2VL model is not supported yet, use llama-qwen2vl-cli instead");
62
+
63
int minicpmv_version = clip_is_minicpmv(ctx_clip);
64
if (minicpmv_version == 2) {
65
// minicpmv 2.5 format:
@@ -504,6 +506,7 @@ int32_t mtmd_helper_eval(mtmd_context * ctx,
504
506
505
507
if (mtmd_decode_use_non_causal(ctx)) {
508
llama_set_causal_attn(lctx, false);
509
+ // TODO @ngxson : need to make sure only one image is processed at a time, and n_ubatch must be enough to hold the image
510
511
512
while (i_batch < n_img_batches) { // split into batches
0 commit comments