Skip to content

Commit 439e68c

Browse files
committed
cmake : re-enable GCC -Wshadow
ggml-ci
1 parent 34889bf commit 439e68c

File tree

16 files changed

+73
-65
lines changed

16 files changed

+73
-65
lines changed

cmake/common.cmake

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,12 @@ function(llama_add_compile_flags)
1515

1616
list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn)
1717

18-
# GCC -Wshadow is way too agressive
19-
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
18+
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
2019
list(APPEND CXX_FLAGS -Wshadow)
20+
21+
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
22+
list(APPEND CXX_FLAGS -Wshadow -Wshadow-field-in-constructor)
23+
endif()
2124
endif()
2225

2326
list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function)

common/arg.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -25,33 +25,33 @@ struct common_arg {
2525
void (*handler_int) (common_params & params, int) = nullptr;
2626

2727
common_arg(
28-
const std::initializer_list<const char *> & args,
29-
const char * value_hint,
30-
const std::string & help,
28+
const std::initializer_list<const char *> & args_,
29+
const char * value_hint_,
30+
const std::string & help_,
3131
void (*handler)(common_params & params, const std::string &)
32-
) : args(args), value_hint(value_hint), help(help), handler_string(handler) {}
32+
) : args(args_), value_hint(value_hint_), help(help_), handler_string(handler) {}
3333

3434
common_arg(
35-
const std::initializer_list<const char *> & args,
36-
const char * value_hint,
37-
const std::string & help,
35+
const std::initializer_list<const char *> & args_,
36+
const char * value_hint_,
37+
const std::string & help_,
3838
void (*handler)(common_params & params, int)
39-
) : args(args), value_hint(value_hint), help(help), handler_int(handler) {}
39+
) : args(args_), value_hint(value_hint_), help(help_), handler_int(handler) {}
4040

4141
common_arg(
42-
const std::initializer_list<const char *> & args,
43-
const std::string & help,
42+
const std::initializer_list<const char *> & args_,
43+
const std::string & help_,
4444
void (*handler)(common_params & params)
45-
) : args(args), help(help), handler_void(handler) {}
45+
) : args(args_), help(help_), handler_void(handler) {}
4646

4747
// support 2 values for arg
4848
common_arg(
49-
const std::initializer_list<const char *> & args,
50-
const char * value_hint,
51-
const char * value_hint_2,
52-
const std::string & help,
49+
const std::initializer_list<const char *> & args_,
50+
const char * value_hint_,
51+
const char * value_hint_2_,
52+
const std::string & help_,
5353
void (*handler)(common_params & params, const std::string &, const std::string &)
54-
) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
54+
) : args(args_), value_hint(value_hint_), value_hint_2(value_hint_2_), help(help_), handler_str_str(handler) {}
5555

5656
common_arg & set_examples(std::initializer_list<enum llama_example> vals);
5757
common_arg & set_excludes(std::initializer_list<enum llama_example> vals);
@@ -69,7 +69,7 @@ struct common_params_context {
6969
common_params & params;
7070
std::vector<common_arg> options;
7171
void(*print_usage)(int, char **) = nullptr;
72-
common_params_context(common_params & params) : params(params) {}
72+
common_params_context(common_params & params_) : params(params_) {}
7373
};
7474

7575
// parse input arguments from CLI

examples/export-lora/export-lora.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ struct file_input {
6666
float alpha;
6767
float scale;
6868

69-
file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
69+
file_input(std::string & fname, float scale_): f_in(fname, std::ios::binary), scale(scale_) {
7070
if (!f_in.is_open()) {
7171
throw std::runtime_error("failed to open input gguf from " + fname);
7272
}
@@ -131,7 +131,7 @@ struct lora_merge_ctx {
131131
std::string & base_fname,
132132
std::vector<common_adapter_lora_info> & lora_files,
133133
std::string & outfile,
134-
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
134+
int n_threads_) : base_model(base_fname, 0), n_threads(n_threads_), fout(outfile, std::ios::binary) {
135135
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
136136

137137
if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
@@ -157,7 +157,7 @@ struct lora_merge_ctx {
157157
allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
158158
}
159159

160-
void check_metadata_lora(file_input * adapter) {
160+
void check_metadata_lora(const file_input * adapter) const {
161161
auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
162162
if (general_type != "adapter") {
163163
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
@@ -175,7 +175,7 @@ struct lora_merge_ctx {
175175
}
176176
}
177177

178-
ggml_type get_out_tensor_type(struct ggml_tensor * t) {
178+
static ggml_type get_out_tensor_type(struct ggml_tensor * t) {
179179
if (t->type == GGML_TYPE_F32) {
180180
return GGML_TYPE_F32;
181181
} else {

examples/gguf-split/gguf-split.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -204,14 +204,14 @@ struct split_strategy {
204204
// temporary buffer for reading in tensor data
205205
std::vector<uint8_t> read_buf;
206206

207-
split_strategy(const split_params & params,
208-
std::ifstream & f_input,
209-
struct gguf_context * ctx_gguf,
210-
struct ggml_context * ctx_meta) :
211-
params(params),
212-
f_input(f_input),
213-
ctx_gguf(ctx_gguf),
214-
ctx_meta(ctx_meta),
207+
split_strategy(const split_params & params_,
208+
std::ifstream & f_input_,
209+
struct gguf_context * ctx_gguf_,
210+
struct ggml_context * ctx_meta_) :
211+
params(params_),
212+
f_input(f_input_),
213+
ctx_gguf(ctx_gguf_),
214+
ctx_meta(ctx_meta_),
215215
n_tensors(gguf_get_n_tensors(ctx_gguf)) {
216216

217217
// because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits

examples/run/CMakeLists.txt

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@ install(TARGETS ${TARGET} RUNTIME)
44
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
55
target_compile_features(${TARGET} PRIVATE cxx_std_17)
66

7-
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
8-
target_compile_options(${TARGET} PRIVATE -Wno-shadow) # TMP
7+
# TMP
8+
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
9+
target_compile_options(${TARGET} PRIVATE -Wno-shadow)
10+
11+
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
12+
target_compile_options(${TARGET} PRIVATE -Wno-shadow-field-in-constructor)
13+
endif()
914
endif()

examples/server/server.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ struct server_task {
200200
// used by SERVER_TASK_TYPE_SET_LORA
201201
std::vector<common_adapter_lora_info> set_lora;
202202

203-
server_task(server_task_type type) : type(type) {}
203+
server_task(server_task_type type_) : type(type_) {}
204204

205205
static slot_params params_from_json_cmpl(
206206
const llama_context * ctx,

src/llama-adapter.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ struct llama_adapter_lora_weight {
5555
}
5656

5757
llama_adapter_lora_weight() = default;
58-
llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
58+
llama_adapter_lora_weight(struct ggml_tensor * a_, struct ggml_tensor * b_) : a(a_), b(b_) {}
5959
};
6060

6161
struct llama_adapter_lora {

src/llama-arch.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1443,7 +1443,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
14431443
{LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
14441444
};
14451445

1446-
LLM_KV::LLM_KV(llm_arch arch) : arch(arch) {}
1446+
LLM_KV::LLM_KV(llm_arch arch_) : arch(arch_) {}
14471447

14481448
std::string LLM_KV::operator()(llm_kv kv) const {
14491449
return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));

src/llama-arch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ struct LLM_TN_IMPL {
374374
};
375375

376376
struct LLM_TN {
377-
LLM_TN(llm_arch arch) : arch(arch) {}
377+
LLM_TN(llm_arch arch_) : arch(arch_) {}
378378

379379
llm_arch arch;
380380

src/llama-context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
#include <set>
1616

1717
struct llama_context {
18-
llama_context(const llama_model & model)
19-
: model(model)
18+
llama_context(const llama_model & model_)
19+
: model(model_)
2020
, t_start_us(model.t_start_us)
2121
, t_load_us(model.t_load_us) {}
2222

0 commit comments

Comments
 (0)