Skip to content

Commit 8622380

Browse files
committed
[ggml/llama] fix warns as errors in CI build and temp set force C23
1 parent a314634 commit 8622380

File tree

7 files changed

+14
-12
lines changed

7 files changed

+14
-12
lines changed

Makefile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -257,9 +257,9 @@ endif
257257
# Compile flags
258258
#
259259

260-
# keep standard at C11 and C++17
260+
# keep standard at C23 and C++17
261261
MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -DGGML_USE_CPU
262-
MK_CFLAGS = -std=c11 -fPIC
262+
MK_CFLAGS = -std=c23 -fPIC
263263
MK_CXXFLAGS = -std=c++17 -fPIC
264264
MK_NVCCFLAGS = -std=c++17
265265

@@ -393,8 +393,8 @@ MK_CXXFLAGS += \
393393
-Wmissing-noreturn
394394

395395
ifeq ($(LLAMA_FATAL_WARNINGS),1)
396-
MK_CFLAGS += -Werror
397-
MK_CXXFLAGS += -Werror
396+
MK_CFLAGS += -Werror -pedantic-errors -Wno-pedantic
397+
MK_CXXFLAGS += -Werror -pedantic-errors -Wno-pedantic
398398
endif
399399

400400
# this version of Apple ld64 is buggy

ggml/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ option(GGML_BUILD_EXAMPLES "ggml: build examples" ${GGML_STANDALONE})
216216
# dependencies
217217
#
218218

219-
set(CMAKE_C_STANDARD 11)
219+
set(CMAKE_C_STANDARD 23)
220220
set(CMAKE_C_STANDARD_REQUIRED true)
221221

222222
set(CMAKE_CXX_STANDARD 17)

ggml/src/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ endif()
2828
if (GGML_FATAL_WARNINGS)
2929
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
3030
list(APPEND C_FLAGS -Werror)
31+
list(APPEND C_FLAGS -pedantic-errors -Wno-pedantic)
3132
list(APPEND CXX_FLAGS -Werror)
33+
list(APPEND CXX_FLAGS -pedantic-errors -Wno-pedantic)
3234
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
3335
add_compile_options(/WX)
3436
endif()

ggml/src/ggml-quants.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4996,7 +4996,7 @@ static bool validate_fp16(ggml_fp16_t f, size_t i) {
49964996
}
49974997

49984998
bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
4999-
if (type < 0 || type >= GGML_TYPE_COUNT) {
4999+
if (type >= GGML_TYPE_COUNT) {
50005000
fprintf(stderr, "%s: invalid type %d\n", __func__, type);
50015001
return false;
50025002
}

ggml/src/ggml.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1565,7 +1565,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
15651565
struct ggml_tensor * view_src,
15661566
size_t view_offs) {
15671567

1568-
GGML_ASSERT(type >= 0 && type < GGML_TYPE_COUNT);
1568+
GGML_ASSERT(type > 0 && type < GGML_TYPE_COUNT);
15691569
GGML_ASSERT(n_dims >= 1 && n_dims <= GGML_MAX_DIMS);
15701570

15711571
// find the base tensor and absolute offset

ggml/src/gguf.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -546,7 +546,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
546546
ok = ok && gr.read(info.t.type);
547547

548548
// check that tensor type is within defined range
549-
if (info.t.type < 0 || info.t.type >= GGML_TYPE_COUNT) {
549+
if (info.t.type >= GGML_TYPE_COUNT) {
550550
fprintf(stderr, "%s: tensor '%s' has invalid ggml type %d (%s)\n",
551551
__func__, info.t.name, info.t.type, ggml_type_name(info.t.type));
552552
ok = false;

src/llama-vocab.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -826,9 +826,9 @@ struct llm_tokenizer_ugm_session {
826826
}
827827

828828
// initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores
829-
std::vector<struct best_tokenization> tokenization_results(input_len + 1, {vocab.token_unk(), 0, -FLT_MAX});
829+
std::vector<struct best_tokenization> tokenization_results(input_len + 1, {0, vocab.token_unk(), -FLT_MAX});
830830
// at the beginning tokenization score is zero
831-
tokenization_results[0] = { vocab.token_unk(), 0, 0 };
831+
tokenization_results[0] = { 0, vocab.token_unk(), 0 };
832832

833833
for (size_t input_offset = 0; input_offset < input_len;) {
834834
size_t prefix_offset = input_offset;
@@ -858,7 +858,7 @@ struct llm_tokenizer_ugm_session {
858858
const double challenger_score = current_best.score_sum + token_score;
859859
struct best_tokenization & current_champ = tokenization_results[prefix_offset];
860860
if (challenger_score > current_champ.score_sum) {
861-
struct best_tokenization challenger = { token_id, input_offset, (float) challenger_score };
861+
struct best_tokenization challenger = { input_offset, token_id, (float) challenger_score };
862862
current_champ = challenger;
863863
}
864864
}
@@ -872,7 +872,7 @@ struct llm_tokenizer_ugm_session {
872872
prefix_offset = input_offset + n_utf8_code_units;
873873
struct best_tokenization & current_champ = tokenization_results[prefix_offset];
874874
if (challenger_score > current_champ.score_sum) {
875-
struct best_tokenization challenger = { vocab.token_unk(), input_offset, (float) challenger_score };
875+
struct best_tokenization challenger = { input_offset, vocab.token_unk(), (float) challenger_score };
876876
current_champ = challenger;
877877
}
878878
}

0 commit comments

Comments
 (0)