From 04fb13d3c798ee945aa82ecf9d5d89204c806436 Mon Sep 17 00:00:00 2001 From: theraininsky Date: Wed, 12 Feb 2025 23:54:36 +0800 Subject: [PATCH 1/2] llama-bench : fix unexpected global variable initialize sequence issue --- examples/llama-bench/llama-bench.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 4ac19ca86ec56..ed320e171f286 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -876,8 +876,8 @@ static std::vector get_cmd_params_instances(const cmd_param struct test { static const std::string build_commit; static const int build_number; - static const std::string cpu_info; - static const std::string gpu_info; + const std::string cpu_info; + const std::string gpu_info; std::string model_filename; std::string model_type; uint64_t model_size; @@ -903,7 +903,9 @@ struct test { std::string test_time; std::vector samples_ns; - test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) { + test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) + :cpu_info(get_cpu_info()), + gpu_info(get_gpu_info()) { model_filename = inst.model; char buf[128]; llama_model_desc(lmodel, buf, sizeof(buf)); @@ -1058,8 +1060,6 @@ struct test { const std::string test::build_commit = LLAMA_COMMIT; const int test::build_number = LLAMA_BUILD_NUMBER; -const std::string test::cpu_info = get_cpu_info(); -const std::string test::gpu_info = get_gpu_info(); struct printer { virtual ~printer() {} From ae723073d02baffce5b1ba54294ad2e352a982ee Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Fri, 14 Feb 2025 02:13:02 +0100 Subject: [PATCH 2/2] Update examples/llama-bench/llama-bench.cpp --- examples/llama-bench/llama-bench.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index ed320e171f286..f518d02d38689 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -903,9 +903,10 @@ struct test { std::string test_time; std::vector samples_ns; - test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) - :cpu_info(get_cpu_info()), + test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) : + cpu_info(get_cpu_info()), gpu_info(get_gpu_info()) { + model_filename = inst.model; char buf[128]; llama_model_desc(lmodel, buf, sizeof(buf));