Skip to content

Commit 028e0cf

Browse files
ikawrakowIwan Kawrakow
andauthored
Add ability to hide imatrix details in llama-quantize (#329)
Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent d210661 commit 028e0cf

File tree

1 file changed

+26
-5
lines changed

1 file changed

+26
-5
lines changed

examples/quantize/quantize.cpp

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -142,11 +142,12 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
142142
//
143143
[[noreturn]]
144144
static void usage(const char * executable) {
145-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
145+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--hide-imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
146146
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
147147
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
148148
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
149149
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
150+
printf(" --hide-imatrix: do not store imatrix details in the quantized model\n");
150151
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
151152
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
152153
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
@@ -337,6 +338,8 @@ int main(int argc, char ** argv) {
337338

338339
std::vector<std::string> repack_patterns;
339340

341+
bool hide_imatrix = false;
342+
340343
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
341344
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
342345
params.quantize_output_tensor = false;
@@ -429,6 +432,8 @@ int main(int argc, char ** argv) {
429432
} else {
430433
usage(argv[0]);
431434
}
435+
} else if (strcmp(argv[arg_idx], "--hide-imatrix") == 0) {
436+
hide_imatrix = true;
432437
} else if (strcmp(argv[arg_idx], "--include-weights") == 0) {
433438
if (arg_idx < argc-1) {
434439
included_weights.emplace_back(argv[++arg_idx]);
@@ -469,15 +474,23 @@ int main(int argc, char ** argv) {
469474
llama_model_kv_override kvo;
470475
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_FILE);
471476
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
472-
strncpy(kvo.val_str, imatrix_file.c_str(), 127);
477+
if (hide_imatrix) {
478+
strncpy(kvo.val_str, "top_secret", 127);
479+
} else {
480+
strncpy(kvo.val_str, imatrix_file.c_str(), 127);
481+
}
473482
kvo.val_str[127] = '\0';
474483
kv_overrides.emplace_back(std::move(kvo));
475484
}
476485
if (!imatrix_dataset.empty()) {
477486
llama_model_kv_override kvo;
478487
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_DATASET);
479488
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
480-
strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
489+
if (hide_imatrix) {
490+
strncpy(kvo.val_str, "top_secret", 127);
491+
} else {
492+
strncpy(kvo.val_str, imatrix_dataset.c_str(), 127);
493+
}
481494
kvo.val_str[127] = '\0';
482495
kv_overrides.emplace_back(std::move(kvo));
483496
}
@@ -486,15 +499,23 @@ int main(int argc, char ** argv) {
486499
llama_model_kv_override kvo;
487500
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_ENTRIES);
488501
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
489-
kvo.val_i64 = imatrix_data.size();
502+
if (hide_imatrix) {
503+
kvo.val_i64 = 0;
504+
} else {
505+
kvo.val_i64 = imatrix_data.size();
506+
}
490507
kv_overrides.emplace_back(std::move(kvo));
491508
}
492509

493510
if (m_last_call > 0) {
494511
llama_model_kv_override kvo;
495512
std::strcpy(kvo.key, LLM_KV_QUANTIZE_IMATRIX_N_CHUNKS);
496513
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
497-
kvo.val_i64 = m_last_call;
514+
if (hide_imatrix) {
515+
kvo.val_i64 = 0;
516+
} else {
517+
kvo.val_i64 = m_last_call;
518+
}
498519
kv_overrides.emplace_back(std::move(kvo));
499520
}
500521
}

0 commit comments

Comments
 (0)