2626
2727// volatile, because of signal being an interrupt
2828static volatile bool g_is_generating = false ;
29- static volatile bool is_app_running = true ;
29+ static volatile bool g_is_interrupted = false ;
3030
3131/* *
3232 * Please note that this is NOT a production-ready stuff.
@@ -52,10 +52,10 @@ static void sigint_handler(int signo) {
5252 g_is_generating = false ;
5353 } else {
5454 console::cleanup ();
55- if (!is_app_running ) {
55+ if (g_is_interrupted ) {
5656 _exit (1 );
5757 }
58- is_app_running = false ;
58+ g_is_interrupted = true ;
5959 }
6060 }
6161}
@@ -171,7 +171,7 @@ struct decode_embd_batch {
171171static int generate_response (mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) {
172172 llama_tokens generated_tokens;
173173 for (int i = 0 ; i < n_predict; i++) {
174- if (i > n_predict || !g_is_generating || !is_app_running ) {
174+ if (i > n_predict || !g_is_generating || g_is_interrupted ) {
175175 printf (" \n " );
176176 break ;
177177 }
@@ -188,7 +188,7 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int
188188 printf (" %s" , common_token_to_piece (ctx.lctx , token_id).c_str ());
189189 fflush (stdout);
190190
191- if (!is_app_running ) {
191+ if (g_is_interrupted ) {
192192 printf (" \n " );
193193 break ;
194194 }
@@ -215,7 +215,7 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, std::vect
215215 LOG_DBG (" formatted_chat.prompt: %s\n " , formatted_chat.prompt .c_str ());
216216
217217 for (auto & fname : images_fname) {
218- if (!is_app_running ) return 0 ;
218+ if (g_is_interrupted ) return 0 ;
219219 mtmd_bitmap bitmap;
220220 if (mtmd_helper_bitmap_init_from_file (fname.c_str (), bitmap)) {
221221 LOG_ERR (" Unable to load image %s\n " , fname.c_str ());
@@ -230,7 +230,7 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, std::vect
230230 text.parse_special = true ;
231231 mtmd_input_chunks chunks;
232232
233- if (!is_app_running ) return 0 ;
233+ if (g_is_interrupted ) return 0 ;
234234
235235 int32_t res = mtmd_tokenize (ctx.ctx_vision .get (), chunks, text, bitmaps);
236236 if (res != 0 ) {
@@ -289,7 +289,7 @@ int main(int argc, char ** argv) {
289289#endif
290290 }
291291
292- if (!is_app_running ) return 130 ;
292+ if (g_is_interrupted ) return 130 ;
293293
294294 if (is_single_turn) {
295295 g_is_generating = true ;
@@ -302,7 +302,7 @@ int main(int argc, char ** argv) {
302302 if (eval_message (ctx, msg, params.image , true )) {
303303 return 1 ;
304304 }
305- if (is_app_running && generate_response (ctx, smpl, n_predict)) {
305+ if (g_is_interrupted && generate_response (ctx, smpl, n_predict)) {
306306 return 1 ;
307307 }
308308
@@ -317,13 +317,13 @@ int main(int argc, char ** argv) {
317317 std::vector<std::string> images_fname;
318318 std::string content;
319319
320- while (is_app_running ) {
320+ while (!g_is_interrupted ) {
321321 g_is_generating = false ;
322322 LOG (" \n > " );
323323 console::set_display (console::user_input);
324324 std::string line;
325325 console::readline (line, false );
326- if (!is_app_running ) break ;
326+ if (g_is_interrupted ) break ;
327327 console::set_display (console::reset);
328328 line = string_strip (line);
329329 if (line.empty ()) {
@@ -351,7 +351,7 @@ int main(int argc, char ** argv) {
351351 msg.role = " user" ;
352352 msg.content = content;
353353 int ret = eval_message (ctx, msg, images_fname, is_first_msg);
354- if (!is_app_running ) break ;
354+ if (g_is_interrupted ) break ;
355355 if (ret == 2 ) {
356356 // non-fatal error
357357 images_fname.clear ();
@@ -369,7 +369,7 @@ int main(int argc, char ** argv) {
369369 is_first_msg = false ;
370370 }
371371 }
372- if (!is_app_running ) LOG (" \n Interrupted by user\n " );
372+ if (g_is_interrupted ) LOG (" \n Interrupted by user\n " );
373373 llama_perf_context_print (ctx.lctx );
374- return is_app_running ? 0 : 130 ;
374+ return g_is_interrupted ? 130 : 0 ;
375375}
0 commit comments