@@ -300,14 +300,10 @@ int main(int argc, char ** argv) {
300300 return 1 ;
301301 }
302302
303- for ( auto & image : params.image ) {
303+ if ( prompt_contains_image ( params.prompt ) ) {
304304 auto ctx_llava = llava_init_context (¶ms, model);
305305
306- auto image_embed = load_image (ctx_llava, ¶ms, image);
307- if (!image_embed) {
308- std::cerr << " error: failed to load image " << image << " . Terminating\n\n " ;
309- return 1 ;
310- }
306+ auto image_embed = load_image (ctx_llava, ¶ms, " " );
311307
312308 // process the prompt
313309 process_prompt (ctx_llava, image_embed, ¶ms, params.prompt );
@@ -316,7 +312,26 @@ int main(int argc, char ** argv) {
316312 llava_image_embed_free (image_embed);
317313 ctx_llava->model = NULL ;
318314 llava_free (ctx_llava);
315+ } else {
316+ for (auto & image : params.image ) {
317+ auto ctx_llava = llava_init_context (¶ms, model);
318+
319+ auto image_embed = load_image (ctx_llava, ¶ms, image);
320+ if (!image_embed) {
321+ std::cerr << " error: failed to load image " << image << " . Terminating\n\n " ;
322+ return 1 ;
323+ }
324+
325+ // process the prompt
326+ process_prompt (ctx_llava, image_embed, ¶ms, params.prompt );
327+
328+ llama_print_timings (ctx_llava->ctx_llama );
329+ llava_image_embed_free (image_embed);
330+ ctx_llava->model = NULL ;
331+ llava_free (ctx_llava);
332+ }
319333 }
334+
320335 llama_free_model (model);
321336
322337 return 0 ;
0 commit comments