Skip to content

Commit 78cf276

Browse files
fix: suppress unused variable warnings in test-adapter.cpp
- Add (void)variable_name; statements to suppress -Werror=unused-variable - Fixes compilation errors in ubuntu-cpu-cmake CI build - Variables are used in assertions but compiler doesn't recognize assert usage Co-Authored-By: Jaime Mizrachi <[email protected]>
1 parent 3041d99 commit 78cf276

File tree

1 file changed

+14
-0
lines changed

1 file changed

+14
-0
lines changed

tests/test-adapter.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@ static void test_cvec_apply_to() {
252252
ggml_tensor * input_tensor = create_mock_tensor(512);
253253

254254
ggml_tensor * result = cvec.apply_to(nullptr, input_tensor, 0);
255+
(void)result;
255256
assert(result == input_tensor);
256257
std::cout << " ✓ Returns input tensor when no layer tensor available" << std::endl;
257258
}
@@ -443,6 +444,7 @@ static void test_cvec_boundary_conditions() {
443444
ggml_tensor * input_tensor = create_mock_tensor(512);
444445

445446
ggml_tensor * result = cvec.apply_to(nullptr, input_tensor, 0);
447+
(void)result;
446448
assert(result == input_tensor);
447449
std::cout << " ✓ apply_to returns input tensor when cvec uninitialized" << std::endl;
448450
}
@@ -452,6 +454,7 @@ static void test_cvec_boundary_conditions() {
452454
ggml_tensor * input_tensor = create_mock_tensor(512);
453455

454456
ggml_tensor * result = cvec.apply_to(nullptr, input_tensor, 50);
457+
(void)result;
455458
assert(result == input_tensor);
456459
std::cout << " ✓ apply_to returns input tensor for high layer index" << std::endl;
457460
}
@@ -464,6 +467,7 @@ static void test_cvec_apply_functionality() {
464467
llama_adapter_cvec cvec;
465468

466469
bool result = cvec.apply(*(llama_model*)nullptr, nullptr, 0, 0, 0, 0);
470+
(void)result;
467471
assert(result == true);
468472
std::cout << " ✓ apply with nullptr data returns true" << std::endl;
469473
}
@@ -479,6 +483,7 @@ static void test_lora_weight_edge_cases() {
479483
float alpha = 32.0f;
480484
float adapter_scale = 1.0f;
481485
float actual_scale = weight.get_scale(alpha, adapter_scale);
486+
(void)actual_scale;
482487

483488
assert(std::isinf(actual_scale) || std::isnan(actual_scale));
484489
std::cout << " ✓ Division by zero rank handled" << std::endl;
@@ -491,6 +496,7 @@ static void test_lora_weight_edge_cases() {
491496
float alpha = 0.0f;
492497
float adapter_scale = 2.5f;
493498
float actual_scale = weight.get_scale(alpha, adapter_scale);
499+
(void)actual_scale;
494500

495501
assert(actual_scale == adapter_scale);
496502
std::cout << " ✓ Zero alpha defaults to adapter_scale" << std::endl;
@@ -505,6 +511,7 @@ static void test_lora_adapter_advanced() {
505511

506512
ggml_tensor * tensor_with_long_name = create_mock_tensor(1, 1, 1, 1, "very_long_tensor_name_that_exceeds_normal_limits");
507513
llama_adapter_lora_weight * result = adapter.get_weight(tensor_with_long_name);
514+
(void)result;
508515

509516
assert(result == nullptr);
510517
std::cout << " ✓ get_weight handles long tensor names" << std::endl;
@@ -545,11 +552,13 @@ static void test_metadata_advanced() {
545552
char buf[256];
546553
for (int i = 0; i < 3; ++i) {
547554
int32_t result = llama_adapter_meta_key_by_index(&adapter, i, buf, sizeof(buf));
555+
(void)result;
548556
assert(result > 0);
549557
assert(strlen(buf) > 0);
550558
}
551559

552560
int32_t result = llama_adapter_meta_key_by_index(&adapter, 3, buf, sizeof(buf));
561+
(void)result;
553562
assert(result == -1);
554563
std::cout << " ✓ meta_key_by_index boundary testing" << std::endl;
555564
}
@@ -560,6 +569,7 @@ static void test_metadata_advanced() {
560569

561570
char small_buf[10];
562571
int32_t result = llama_adapter_meta_key_by_index(&adapter, 0, small_buf, sizeof(small_buf));
572+
(void)result;
563573

564574
assert(result > 0);
565575
assert(strlen(small_buf) < sizeof(small_buf));
@@ -572,6 +582,7 @@ static void test_metadata_advanced() {
572582

573583
char buf[256];
574584
int32_t result = llama_adapter_meta_val_str(&adapter, "key", buf, sizeof(buf));
585+
(void)result;
575586

576587
assert(result > 0);
577588
assert(strlen(buf) < sizeof(buf));
@@ -589,6 +600,7 @@ static void test_edge_cases() {
589600
float alpha = 1e-10f;
590601
float adapter_scale = 1e-10f;
591602
float actual_scale = weight.get_scale(alpha, adapter_scale);
603+
(void)actual_scale;
592604

593605
assert(std::isfinite(actual_scale));
594606
std::cout << " ✓ Very small floating point values" << std::endl;
@@ -601,6 +613,7 @@ static void test_edge_cases() {
601613
float alpha = 1e6f;
602614
float adapter_scale = 1e6f;
603615
float actual_scale = weight.get_scale(alpha, adapter_scale);
616+
(void)actual_scale;
604617

605618
assert(std::isfinite(actual_scale));
606619
std::cout << " ✓ Large floating point values" << std::endl;
@@ -610,6 +623,7 @@ static void test_edge_cases() {
610623
llama_adapter_cvec cvec;
611624

612625
ggml_tensor * result = cvec.tensor_for(1000000);
626+
(void)result;
613627
assert(result == nullptr);
614628
std::cout << " ✓ Very large layer index" << std::endl;
615629
}

0 commit comments

Comments
 (0)