Skip to content

Commit d6d15ca

Browse files
committed
move debug logs of mtmd to stderr
Without these changes, stdout of llama-mtmd-cli gets polluted with debug messages.
1 parent 10d2af0 commit d6d15ca

File tree

2 files changed

+9
-8
lines changed

2 files changed

+9
-8
lines changed

tools/mtmd/mtmd-cli.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_
232232

233233
ctx.n_past = new_n_past;
234234

235-
LOG("\n");
235+
LOG_DBG("\n");
236236

237237
return 0;
238238
}
@@ -256,7 +256,7 @@ int main(int argc, char ** argv) {
256256
}
257257

258258
mtmd_cli_context ctx(params);
259-
LOG("%s: loading model: %s\n", __func__, params.model.path.c_str());
259+
LOG_DBG("%s: loading model: %s\n", __func__, params.model.path.c_str());
260260

261261
bool is_single_turn = !params.prompt.empty() && !params.image.empty();
262262

@@ -364,7 +364,7 @@ int main(int argc, char ** argv) {
364364
}
365365
}
366366
if (g_is_interrupted) LOG("\nInterrupted by user\n");
367-
LOG("\n\n");
367+
LOG_DBG("\n\n");
368368
llama_perf_context_print(ctx.lctx);
369369
return g_is_interrupted ? 130 : 0;
370370
}

tools/mtmd/mtmd-helper.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <cinttypes>
66
#include <vector>
77

8+
#define LOG_DBG(...) fprintf(stderr, __VA_ARGS__)
89
#define LOG_INF(...) fprintf(stdout, __VA_ARGS__)
910
#define LOG_ERR(...) fprintf(stderr, __VA_ARGS__)
1011

@@ -187,7 +188,7 @@ int32_t mtmd_helper_decode_image_chunk(
187188
int n_tokens_batch = std::min(n_batch, n_tokens - pos_offset);
188189
llama_batch batch_embd_view = batch_embd.get_view(pos_offset, n_tokens_batch);
189190

190-
LOG_INF("decoding image batch %d/%d, n_tokens_batch = %d\n", i_batch+1, n_img_batches, n_tokens_batch);
191+
LOG_DBG("decoding image batch %d/%d, n_tokens_batch = %d\n", i_batch+1, n_img_batches, n_tokens_batch);
191192

192193
int64_t t1 = ggml_time_ms();
193194
int32_t ret = llama_decode(lctx, batch_embd_view);
@@ -197,7 +198,7 @@ int32_t mtmd_helper_decode_image_chunk(
197198
return ret;
198199
}
199200

200-
LOG_INF("image decoded (batch %d/%d) in %" PRId64 " ms\n", i_batch+1, n_img_batches, ggml_time_ms() - t1);
201+
LOG_DBG("image decoded (batch %d/%d) in %" PRId64 " ms\n", i_batch+1, n_img_batches, ggml_time_ms() - t1);
201202

202203
i_batch++;
203204
}
@@ -226,7 +227,7 @@ int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
226227
if (chunk_type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
227228
size_t n_tokens;
228229
const auto tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens);
229-
// LOG_INF("decoding text chunk, n_tokens = %zu\n", n_tokens);
230+
// LOG_DBG("decoding text chunk, n_tokens = %zu\n", n_tokens);
230231
size_t i = 0;
231232
while (i < n_tokens) { // split into batches
232233
text_batch.n_tokens = 0; // clear the batch
@@ -255,7 +256,7 @@ int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
255256
const auto image_tokens = mtmd_input_chunk_get_tokens_image(chunk);
256257
int64_t t0 = ggml_time_ms();
257258

258-
LOG_INF("encoding image or slice...\n");
259+
LOG_DBG("encoding image or slice...\n");
259260

260261
ret = mtmd_encode(ctx, image_tokens);
261262
if (ret != 0) {
@@ -264,7 +265,7 @@ int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
264265
return ret;
265266
}
266267

267-
LOG_INF("image/slice encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
268+
LOG_DBG("image/slice encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
268269

269270
float * embd = mtmd_get_output_embd(ctx);
270271
ret = mtmd_helper_decode_image_chunk(ctx, lctx, chunk, embd, n_past, seq_id, n_batch, new_n_past);

0 commit comments

Comments
 (0)