Skip to content

Commit ccc90ab

Browse files
committed
Fix logging from llama-llava-cli
Use the actual logging facilities from common, instead of bespoke LOG_xxx() macros that unconditionally print to stdout/stderr. The actual response is printed with printf(), so --log-disable won't also swallow the response.
1 parent 2f8bd2b commit ccc90ab

File tree

3 files changed

+6
-14
lines changed

3 files changed

+6
-14
lines changed

examples/llava/clip.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include "ggml.h"
77
#include "ggml-alloc.h"
88
#include "ggml-backend.h"
9+
#include "log.h"
910

1011
#ifdef GGML_USE_CUDA
1112
#include "ggml-cuda.h"
@@ -39,11 +40,6 @@
3940
#include <cinttypes>
4041
#include <limits>
4142

42-
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
43-
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
44-
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
45-
#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
46-
4743
//#define CLIP_DEBUG_FUNCTIONS
4844

4945
// RGB uint8 image

examples/llava/llava-cli.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
203203
response += tmp;
204204
if (strcmp(tmp, "</s>") == 0) break;
205205
if (strstr(tmp, "###")) break; // Yi-VL behavior
206-
LOG("%s", tmp);
206+
printf("%s", tmp);
207207
if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
208208
if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
209209
if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
@@ -212,7 +212,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
212212
}
213213

214214
common_sampler_free(smpl);
215-
LOG("\n");
215+
printf("\n");
216216
}
217217

218218
static struct llama_model * llava_init(common_params * params) {

examples/llava/llava.cpp

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#include "llava.h"
33

44
#include "llama.h"
5+
#include "log.h"
56

67
#include <algorithm>
78
#include <cerrno>
@@ -11,13 +12,8 @@
1112
#include <limits>
1213
#include <vector>
1314

14-
#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
15-
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
16-
17-
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
18-
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
19-
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
20-
#define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
15+
#define die(msg) do { LOG_ERR("%s", "error: " msg "\n"); exit(1); } while (0)
16+
#define die_fmt(fmt, ...) do { LOG_ERR("error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
2117

2218
// RGB uint8 image
2319
struct clip_image_u8 {

0 commit comments

Comments
 (0)