Skip to content

Commit 7bc60eb

Browse files
committed
llama-context: fix build fails with -Werror=missing-braces
ref: GH-16614 ## Why it failed When compiling with strict compiler flags (-Wmissing-braces -Werror=missing-braces), the build fails with the following error: ``` cmake \ -S . \ -B ../llama.cpp.build \ --preset=x64-linux-gcc-debug \ -DCMAKE_INSTALL_PREFIX=/tmp/local \ -DCMAKE_CXX_FLAGS="-Wmissing-braces -Werror=missing-braces" && \ cmake --build ../llama.cpp.build/ ... /home/otegami/work/cpp/llama.cpp/src/llama-context.cpp: In function ‘void llama_memory_breakdown_print(const llama_context*)’: /home/otegami/work/cpp/llama.cpp/src/llama-context.cpp:2801:25: error: missing braces around initializer for ‘std::__array_traits<std::__cxx11::basic_string<char>, 9>::_Type’ {aka ‘std::__cxx11::basic_string<char> [9]’} [-Werror=missing-braces] ... ``` The issue is that std::array initialization requires double braces. ## How to fix This PR changes `{ 0 }` to `{{ 0 }}` for std::array initialization. This is part of a series of commits to fix missing braces warnings across the codebase. - Done: ~~src/llama-batch.h~~ - src/llama-context.cpp <- This PR is here. - tests/test-backend-ops.cpp - tests/test-gguf.cpp - tools/mtmd/clip.cpp Benefits: - std::array is a struct containing a C-style array, requiring nested braces - Enables stricter compiler warnings to catch potential issues
1 parent 6de8ed7 commit 7bc60eb

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/llama-context.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2798,7 +2798,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
27982798
const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n";
27992799
const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n";
28002800

2801-
table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"});
2801+
table_data.push_back({{template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"}});
28022802

28032803
constexpr size_t MiB = 1024 * 1024;
28042804
const std::vector<std::string> desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "};
@@ -2858,7 +2858,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
28582858
const size_t self = mb.model + mb.context + mb.compute;
28592859
const size_t unaccounted = total - self - free;
28602860

2861-
table_data.push_back({
2861+
table_data.push_back({{
28622862
template_gpu,
28632863
" - " + name + " (" + desc + ")",
28642864
std::to_string(total / MiB),
@@ -2867,13 +2867,13 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
28672867
std::to_string(mb.model / MiB),
28682868
std::to_string(mb.context / MiB),
28692869
std::to_string(mb.compute / MiB),
2870-
std::to_string(unaccounted / MiB)});
2870+
std::to_string(unaccounted / MiB)}});
28712871
}
28722872

28732873
// print memory breakdown for host:
28742874
{
28752875
const size_t self = mb_host.model + mb_host.context + mb_host.compute;
2876-
table_data.push_back({
2876+
table_data.push_back({{
28772877
template_other,
28782878
" - Host",
28792879
"", // total
@@ -2882,7 +2882,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
28822882
std::to_string(mb_host.model / MiB),
28832883
std::to_string(mb_host.context / MiB),
28842884
std::to_string(mb_host.compute / MiB),
2885-
""}); // unaccounted
2885+
""}}); // unaccounted
28862886
}
28872887

28882888
// print memory breakdown for all remaining buffer types:
@@ -2894,7 +2894,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
28942894
}
28952895
const std::string name = ggml_backend_buft_name(buft);
28962896
const size_t self = mb.model + mb.context + mb.compute;
2897-
table_data.push_back({
2897+
table_data.push_back({{
28982898
template_other,
28992899
" - " + name,
29002900
"", // total
@@ -2903,7 +2903,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
29032903
std::to_string(mb.model / MiB),
29042904
std::to_string(mb.context / MiB),
29052905
std::to_string(mb.compute / MiB),
2906-
""}); // unaccounted
2906+
""}}); // unaccounted
29072907
seen_buffer_types.insert(buft);
29082908
}
29092909

0 commit comments

Comments
 (0)