@@ -151,7 +151,7 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int
151151 llama_tokens generated_tokens;
152152 for (int i = 0 ; i < n_predict; i++) {
153153 if (i > n_predict || !g_is_generating || g_is_interrupted) {
154- printf (" \n " );
154+ LOG (" \n " );
155155 break ;
156156 }
157157
@@ -160,15 +160,15 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int
160160 common_sampler_accept (smpl, token_id, true );
161161
162162 if (llama_vocab_is_eog (ctx.vocab , token_id) || ctx.check_antiprompt (generated_tokens)) {
163- printf (" \n " );
163+ LOG (" \n " );
164164 break ; // end of generation
165165 }
166166
167- printf (" %s" , common_token_to_piece (ctx.lctx , token_id).c_str ());
167+ LOG (" %s" , common_token_to_piece (ctx.lctx , token_id).c_str ());
168168 fflush (stdout);
169169
170170 if (g_is_interrupted) {
171- printf (" \n " );
171+ LOG (" \n " );
172172 break ;
173173 }
174174
@@ -214,7 +214,7 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_
214214
215215 ctx.n_past += mtmd_helper_get_n_pos (chunks);
216216
217- printf (" \n " );
217+ LOG (" \n " );
218218
219219 return 0 ;
220220}
@@ -238,7 +238,7 @@ int main(int argc, char ** argv) {
238238 }
239239
240240 mtmd_cli_context ctx (params);
241- printf (" %s: %s\n " , __func__, params.model .path .c_str ());
241+ LOG (" %s: loading model : %s\n " , __func__, params.model .path .c_str ());
242242
243243 bool is_single_turn = !params.prompt .empty () && !params.image .empty ();
244244
0 commit comments