diff --git a/ggml_extend.hpp b/ggml_extend.hpp index 560d28613..b88344e18 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -56,6 +56,25 @@ #define __STATIC_INLINE__ static inline #endif +__STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const char* text, void*) { + switch (level) { + case GGML_LOG_LEVEL_DEBUG: + LOG_DEBUG(text); + break; + case GGML_LOG_LEVEL_INFO: + LOG_INFO(text); + break; + case GGML_LOG_LEVEL_WARN: + LOG_WARN(text); + break; + case GGML_LOG_LEVEL_ERROR: + LOG_ERROR(text); + break; + default: + LOG_DEBUG(text); + } +} + static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); // n-mode trensor-matrix product @@ -124,13 +143,6 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_kronecker(ggml_context* ctx, struct g b); } -__STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const char* text, void* user_data) { - (void)level; - (void)user_data; - fputs(text, stderr); - fflush(stderr); -} - __STATIC_INLINE__ void ggml_tensor_set_f32_randn(struct ggml_tensor* tensor, std::shared_ptr rng) { uint32_t n = (uint32_t)ggml_nelements(tensor); std::vector random_numbers = rng->randn(n); diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 088947311..9c30fa2d4 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -145,7 +145,6 @@ class StableDiffusionGGML { #endif #ifdef SD_USE_METAL LOG_DEBUG("Using Metal backend"); - ggml_log_set(ggml_log_callback_default, nullptr); backend = ggml_backend_metal_init(); #endif #ifdef SD_USE_VULKAN @@ -192,6 +191,8 @@ class StableDiffusionGGML { rng = std::make_shared(); } + ggml_log_set(ggml_log_callback_default, nullptr); + init_backend(); ModelLoader model_loader; diff --git a/upscaler.cpp b/upscaler.cpp index 4ab0b73c2..2bd62c09e 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -19,13 +19,13 @@ struct UpscalerGGML { bool load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu) { + ggml_log_set(ggml_log_callback_default, nullptr); #ifdef SD_USE_CUDA LOG_DEBUG("Using CUDA backend"); backend = ggml_backend_cuda_init(0); #endif #ifdef SD_USE_METAL LOG_DEBUG("Using Metal backend"); - ggml_log_set(ggml_log_callback_default, nullptr); backend = ggml_backend_metal_init(); #endif #ifdef SD_USE_VULKAN diff --git a/util.cpp b/util.cpp index c2468ac54..b9142e606 100644 --- a/util.cpp +++ b/util.cpp @@ -414,7 +414,10 @@ void log_printf(sd_log_level_t level, const char* file, int line, const char* fo if (written >= 0 && written < LOG_BUFFER_SIZE) { vsnprintf(log_buffer + written, LOG_BUFFER_SIZE - written, format, args); } - strncat(log_buffer, "\n", LOG_BUFFER_SIZE - strlen(log_buffer)); + size_t len = strlen(log_buffer); + if (log_buffer[len - 1] != '\n') { + strncat(log_buffer, "\n", LOG_BUFFER_SIZE - len); + } if (sd_log_cb) { sd_log_cb(level, log_buffer, sd_log_cb_data);