Skip to content

Commit 39ae015

Browse files
Fix: Address various compiler warnings
- ggml/src/ggml-cpu/ggml-cpu.c: - Fix cast discarding 'const' qualifier. - Remove unused variables 'bs' and 'nbw0'. - src/llama-quant.cpp: - Fix comparison of integer expressions of different signedness. - Remove unused variables 'thread_src', 'thread_dst_char', and 'total_size_written'.
1 parent 0e0a92e commit 39ae015

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

ggml/src/ggml-cpu/ggml-cpu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8718,7 +8718,7 @@ static void ggml_compute_forward_mul_mat_one_chunk(
87188718
// If src1 is already quantized, it must match vec_dot_type_for_src1
87198719
// This path is less common as per todo.txt and might need more robust handling
87208720
GGML_ASSERT(src1->type == vec_dot_type_for_src1);
8721-
src1_segment_prepared_data = (void *)src1_segment_f32_ptr; // This cast is placeholder, actual pointer would be from src1 data
8721+
src1_segment_prepared_data = (const void *)src1_segment_f32_ptr; // This cast is placeholder, actual pointer would be from src1 data
87228722
}
87238723

87248724
float segment_result = 0.0f;
@@ -8863,7 +8863,7 @@ UseGgmlGemm1:;
88638863
} else if (src1->type != vec_dot_type) {
88648864
char * wdata = params->wdata;
88658865

8866-
const size_t nbw0 = ggml_type_size(vec_dot_type);
8866+
// const size_t nbw0 = ggml_type_size(vec_dot_type); // Unused
88678867
const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
88688868
const size_t nbw2 = nbw1*ne11;
88698869
const size_t nbw3 = nbw2*ne12;
@@ -8875,7 +8875,7 @@ UseGgmlGemm1:;
88758875
for (int64_t i13 = 0; i13 < ne13; ++i13) {
88768876
for (int64_t i12 = 0; i12 < ne12; ++i12) {
88778877
for (int64_t i11 = 0; i11 < ne11; ++i11) { // Changed loop to cover all of src1
8878-
size_t bs = ggml_blck_size(vec_dot_type);
8878+
// size_t bs = ggml_blck_size(vec_dot_type); // Unused
88798879
// Parallelize quantization of src1 rows if multiple threads are available for this part
88808880
// For simplicity in this change, assuming ith=0, nth=1 for src1 quantization here
88818881
// or that from_float is thread-safe and handles partitioning if params->ith/nth are used.

src/llama-quant.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ SmarterQuantConfig load_smarter_quant_config(const std::string & fname) {
9999
tensor_info.n_cols_for_permutation = 0; // Reset before goto
100100
goto next_tensor_label;
101101
}
102-
for (size_t i = 0; i < tensor_info.n_cols_for_permutation; ++i) {
102+
for (size_t i = 0; i < (size_t)tensor_info.n_cols_for_permutation; ++i) {
103103
if (!permutation_json[i].is_number_integer()) {
104104
LLAMA_LOG_WARN("%s: Invalid type for 'column_permutation[%zu]' for tensor '%s'. Expected integer. Skipping tensor processing.\n", __func__, i, tensor_name.c_str());
105105
delete[] tensor_info.column_permutation;
@@ -287,16 +287,16 @@ static size_t llama_tensor_quantize_impl(
287287
loc_workers.resize(nthread -1); // nthread-1 worker threads, 1 main thread
288288

289289
int64_t rows_per_thread = (nrows + nthread - 1) / nthread;
290-
size_t total_size_written = 0;
290+
// size_t total_size_written = 0; // Unused
291291
std::mutex size_mutex;
292292

293293
for (int t = 0; t < nthread; ++t) {
294294
const int64_t r_start = t * rows_per_thread;
295295
const int64_t r_end = std::min(r_start + rows_per_thread, nrows);
296296
if (r_start >= r_end) continue;
297297

298-
const float * thread_src = src + r_start * k;
299-
char * thread_dst_char = static_cast<char *>(dst);
298+
// const float * thread_src = src + r_start * k; // Unused
299+
// char * thread_dst_char = static_cast<char *>(dst); // Unused
300300
// Calculate offset into dst for this thread's rows
301301
// This requires knowing the size of previously quantized rows by other threads if types vary,
302302
// or assuming fixed output size per row if type is const for this call.

0 commit comments

Comments
 (0)