Skip to content
Merged
17 changes: 17 additions & 0 deletions cmake/common.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,21 @@ function(llama_add_compile_flags)
set(CXX_FLAGS "" PARENT_SCOPE)
endif()
endif()

if (NOT MSVC)
if (LLAMA_SANITIZE_THREAD)
add_compile_options(-fsanitize=thread)
link_libraries (-fsanitize=thread)
endif()

if (LLAMA_SANITIZE_ADDRESS)
add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
link_libraries (-fsanitize=address)
endif()

if (LLAMA_SANITIZE_UNDEFINED)
add_compile_options(-fsanitize=undefined)
link_libraries (-fsanitize=undefined)
endif()
endif()
endfunction()
2 changes: 2 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
llama_add_compile_flags()

function(llama_test target)
include(CMakeParseArguments)
set(options)
Expand Down
20 changes: 9 additions & 11 deletions tests/test-gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ enum handcrafted_file_type {
HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
};

std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
static std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
switch (hft) {
case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
Expand Down Expand Up @@ -99,7 +99,7 @@ static bool expect_context_not_null(const enum handcrafted_file_type hft) {

typedef std::pair<enum ggml_type, std::array<int64_t, GGML_MAX_DIMS>> tensor_config_t;

std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
static std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Curious, compiler warnings normally pick this up, but not here it seems

std::vector<tensor_config_t> tensor_configs;
tensor_configs.reserve(100);

Expand All @@ -122,7 +122,7 @@ std::vector<tensor_config_t> get_tensor_configs(std::mt19937 & rng) {
return tensor_configs;
}

std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937 rng) {
static std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937 rng) {
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
kv_types.reserve(100);

Expand Down Expand Up @@ -626,8 +626,6 @@ static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const u

bool ok = true;

const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;

for (int i = 0; i < int(tensor_configs.size()); ++i) {
const ggml_type type = tensor_configs[i].first;
const std::array<int64_t, GGML_MAX_DIMS> shape = tensor_configs[i].second;
Expand Down Expand Up @@ -866,13 +864,13 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
case GGUF_TYPE_COUNT:
default: {
GGML_ABORT("fatal error");
} break;
}
}
} break;
case GGUF_TYPE_COUNT:
default: {
GGML_ABORT("fatal error");
} break;
}
}
}

Expand Down Expand Up @@ -938,7 +936,7 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
}

if (type == GGUF_TYPE_ARRAY) {
const int arr_n = gguf_get_arr_n(ctx, id);
const size_t arr_n = gguf_get_arr_n(ctx, id);
if (arr_n != gguf_get_arr_n(other, idx_other)) {
ok = false;
continue;
Expand All @@ -953,7 +951,7 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
if (type_arr == GGUF_TYPE_BOOL) {
const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
if (bool(data[arr_i]) != bool(data_other[arr_i])) {
ok = false;
}
Expand All @@ -962,7 +960,7 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
}

if (type_arr == GGUF_TYPE_STRING) {
for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
const std::string str = gguf_get_arr_str(ctx, id, arr_i);
const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i);
if (str != str_other) {
Expand Down Expand Up @@ -1057,7 +1055,7 @@ static bool same_tensor_data(const struct ggml_context * orig, const struct ggml
ok = false;
}

return true;
return ok;
}

static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
Expand Down
1 change: 0 additions & 1 deletion tests/test-sampling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ static void test_penalties(

sampler_tester tester(probs, probs_expected);

const size_t n_vocab = probs.size();
auto * sampler = llama_sampler_init_penalties(last_tokens.size(), repeat_penalty, alpha_frequency, alpha_presence);

for (size_t i = 0; i < last_tokens.size(); i++) {
Expand Down
Loading