Skip to content

Commit 185e1b1

Browse files
committed
Added GPU support on qwen2vl readme
Undo changes on qwen2vl-cli
1 parent 8777473 commit 185e1b1

File tree

2 files changed

+4
-12
lines changed

2 files changed

+4
-12
lines changed

examples/llava/README-qwen2vl.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ Now the model is ready to use in the `model_path` directory. You can quantize th
5757

5858
*Have fun with the models ! :)*
5959

60-
## Limitations
60+
## Current limitations
6161

62-
* Currently, only support the image to be in the very beginning of the input prompt to the LLM.
62+
* This only supports the image to be in the very beginning of the input prompt to the LLM.
63+
* The vision model (clip.cpp)'s GPU backend support, which Qwen2VL uses, is disabled.

examples/llava/qwen2vl-cli.cpp

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ int main(int argc, char ** argv) {
524524

525525
common_init();
526526

527-
if (params.mmproj.empty()) {
527+
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
528528
print_usage(argc, argv);
529529
return 1;
530530
}
@@ -547,15 +547,6 @@ int main(int argc, char ** argv) {
547547
llava_image_embed_free(image_embed);
548548
ctx_llava->model = NULL;
549549
llava_free(ctx_llava);
550-
} else if (params.image.empty()) {
551-
auto ctx_llava = llava_init_context(&params, model);
552-
553-
// process the prompt
554-
process_prompt(ctx_llava, nullptr, &params, params.prompt);
555-
556-
llama_perf_context_print(ctx_llava->ctx_llama);
557-
ctx_llava->model = NULL;
558-
llava_free(ctx_llava);
559550
#ifndef NDEBUG
560551
} else if (params.image[0].empty()) {
561552
auto ctx_llava = llava_init_context(&params, model);

0 commit comments

Comments
 (0)