@@ -3707,7 +3707,7 @@ struct llama_model_loader {
37073707 }
37083708
37093709 if (param_overrides_p != nullptr) {
3710- for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
3710+ for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
37113711 kv_overrides.insert({std::string(p->key), *p});
37123712 }
37133713 }
@@ -3875,7 +3875,7 @@ struct llama_model_loader {
38753875 ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
38763876
38773877 {
3878- const int kid = gguf_find_key(meta, "general.file_type");
3878+ const int kid = gguf_find_key(meta, "general.file_type"); // TODO: use LLM_KV
38793879 if (kid >= 0) {
38803880 ftype = (llama_ftype) gguf_get_val_u32(meta, kid);
38813881 }
@@ -5369,6 +5369,7 @@ static void llm_load_vocab(
53695369 if (merges_keyidx == -1) {
53705370 throw std::runtime_error("cannot find tokenizer merges in model file\n");
53715371 }
5372+
53725373 const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
53735374 for (int i = 0; i < n_merges; i++) {
53745375 const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
@@ -5407,16 +5408,6 @@ static void llm_load_vocab(
54075408 vocab.special_cls_id = -1;
54085409 vocab.special_mask_id = -1;
54095410
5410- const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
5411- if (add_space_prefix_keyidx != -1) {
5412- vocab.tokenizer_add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
5413- } // The default value of add_space_prefix is true.
5414-
5415- const int remove_extra_whitespaces_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS).c_str());
5416- if (remove_extra_whitespaces_keyidx != -1) {
5417- vocab.tokenizer_remove_extra_whitespaces = gguf_get_val_bool(ctx, remove_extra_whitespaces_keyidx);
5418- } // The default value of remove_extra_whitespaces is false.
5419-
54205411 const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
54215412 if (precompiled_charsmap_keyidx != -1) {
54225413 size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
@@ -5553,10 +5544,8 @@ static void llm_load_vocab(
55535544 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
55545545 }
55555546
5556- const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
5557- if (add_space_prefix_keyidx != -1) {
5558- vocab.tokenizer_add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
5559- }
5547+ ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false);
5548+ ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false);
55605549 }
55615550
55625551 const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
@@ -18288,8 +18277,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1828818277
1828918278 // copy the KV pairs from the input file
1829018279 gguf_set_kv (ctx_out, ml.meta);
18291- gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
18292- gguf_set_val_u32(ctx_out, "general.file_type", ftype);
18280+ gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
18281+ gguf_set_val_u32(ctx_out, "general.file_type", ftype); // TODO: use LLM_KV
18282+
1829318283 // Remove split metadata
1829418284 gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
1829518285 gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
0 commit comments