|
11 | 11 | #include "llama-sampling.h" |
12 | 12 |
|
13 | 13 | #include "unicode.h" |
| 14 | +#include "string.h" |
14 | 15 |
|
15 | 16 | #include "ggml.h" |
16 | 17 | #include "ggml-alloc.h" |
@@ -1424,8 +1425,8 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { |
1424 | 1425 | if (arr_type == GGUF_TYPE_STRING) { |
1425 | 1426 | std::string val = gguf_get_arr_str(ctx_gguf, i, j); |
1426 | 1427 | // escape quotes |
1427 | | - replace_all(val, "\\", "\\\\"); |
1428 | | - replace_all(val, "\"", "\\\""); |
| 1428 | + val = replace_all(val, "\\", "\\\\"); |
| 1429 | + val = replace_all(val, "\"", "\\\""); |
1429 | 1430 | ss << '"' << val << '"'; |
1430 | 1431 | } else if (arr_type == GGUF_TYPE_ARRAY) { |
1431 | 1432 | ss << "???"; |
@@ -3563,7 +3564,7 @@ struct llama_model_loader { |
3563 | 3564 | if (value.size() > MAX_VALUE_LEN) { |
3564 | 3565 | value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); |
3565 | 3566 | } |
3566 | | - replace_all(value, "\n", "\\n"); |
| 3567 | + value = replace_all(value, "\n", "\\n"); |
3567 | 3568 |
|
3568 | 3569 | LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); |
3569 | 3570 | } |
@@ -16397,14 +16398,14 @@ static void llama_lora_adapter_init_internal(struct llama_model * model, const c |
16397 | 16398 | for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { |
16398 | 16399 | std::string name(cur->name); |
16399 | 16400 | if (str_endswith(name, ".lora_a")) { |
16400 | | - replace_all(name, ".lora_a", ""); |
| 16401 | + name = replace_all(name, ".lora_a", ""); |
16401 | 16402 | if (ab_map.find(name) == ab_map.end()) { |
16402 | 16403 | ab_map[name] = llama_lora_weight(cur, nullptr); |
16403 | 16404 | } else { |
16404 | 16405 | ab_map[name].a = cur; |
16405 | 16406 | } |
16406 | 16407 | } else if (str_endswith(name, ".lora_b")) { |
16407 | | - replace_all(name, ".lora_b", ""); |
| 16408 | + name = replace_all(name, ".lora_b", ""); |
16408 | 16409 | if (ab_map.find(name) == ab_map.end()) { |
16409 | 16410 | ab_map[name] = llama_lora_weight(nullptr, cur); |
16410 | 16411 | } else { |
|
0 commit comments