Skip to content

Commit 9e74f83

Browse files
committed
Replace --bpw-bias flag with --no-bias
1 parent ab02bb1 commit 9e74f83

File tree

3 files changed

+20
-42
lines changed

3 files changed

+20
-42
lines changed

include/llama.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ extern "C" {
365365
void * tensor_types; // pointer to vector containing tensor types
366366
void * prune_layers; // pointer to vector containing layer indices to prune
367367
float target_bpw; // target bits per weight (bpw)
368-
int32_t bpw_bias; // type of error bias to use: 0 = no bias (MSE only), 1 = fast (default), 2 = precise (slow)
368+
bool no_bias; // use mean square error estimation only (no aligment bias)
369369
} llama_model_quantize_params;
370370

371371
typedef struct llama_logit_bias {

src/llama-quant.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1153,13 +1153,16 @@ static std::unordered_map<std::string, ggml_type> target_bpw_type(
11531153
// Adjusts the trade-off between systematic bias (introduced by block‑wise scaling) and MSE.
11541154
// Larger values favours quantisation types that produce smaller bias even if the MSE is slightly bigger
11551155
float tensor_lambda = 0.0f;
1156+
std::vector<float> lambdas;
11561157
const float * values = values_sample.empty() ? nullptr : values_sample.data();
11571158
const float * activations = activations_sample.empty() ? nullptr : activations_sample.data();
1158-
auto lambdas = estimate_lambda(values, activations, n_per_row, ne2);
1159-
double acc = 0.0;
1160-
int ns = 0;
1161-
for (float l : lambdas) { acc += l; ++ns; }
1162-
tensor_lambda = ns ? (float)(acc / ns) : 0.0f;
1159+
if (!params->no_bias) {
1160+
double acc = 0.0;
1161+
int ns = 0;
1162+
lambdas = estimate_lambda(values, activations, n_per_row, ne2);
1163+
for (float l : lambdas) { acc += l; ++ns; }
1164+
tensor_lambda = ns ? (float)(acc / ns) : 0.0f;
1165+
}
11631166

11641167
// Evaluate candidates
11651168
std::vector<candidate_types> eval_candidates(compatible_candidates.size());
@@ -1726,8 +1729,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
17261729
} else {
17271730
LLAMA_LOG_WARN("%s: imatrix without activations provided, target bpw quantization will be less accurate - ", __func__);
17281731
}
1729-
const char* msg[] = {"no bias (MSE only)", "fast (default)", "precise (slow)"};
1730-
LLAMA_LOG_INFO("using %s error estimation\n", msg[params->bpw_bias]);
1732+
LLAMA_LOG_INFO("using %s error estimation\n", params->no_bias ? "MSE only (no aligment bias)" : "aligment bias (default)");
17311733
LLAMA_LOG_INFO("%s: computing tensor quantization mix to achieve %.4f bpw\n", __func__, params->target_bpw);
17321734
bpw_overrides = target_bpw_type(ml, read_data, model, tensors, mapped, values_data, activations_data, params, nthread);
17331735
} else {
@@ -2038,7 +2040,7 @@ llama_model_quantize_params llama_model_quantize_default_params() {
20382040
/*.tensor_type =*/ nullptr,
20392041
/*.prune_layers =*/ nullptr,
20402042
/*.target_bpw =*/ -1.0f,
2041-
/*.bpw_bias =*/ 1
2043+
/*.no_bias =*/ false
20422044
};
20432045

20442046
return result;

tools/quantize/quantize.cpp

Lines changed: 9 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -117,12 +117,12 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
117117

118118
[[noreturn]]
119119
static void usage(const char * executable) {
120-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable);
121-
printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
120+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable);
121+
printf(" [--target-bpw n] [--no-bias] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
122122
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
123-
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
124-
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
125-
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
123+
printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
124+
printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
125+
printf(" --pure: disable k-quant mixtures and quantize all tensors to the same type\n");
126126
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
127127
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
128128
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
@@ -134,7 +134,8 @@ static void usage(const char * executable) {
134134
printf(" Advanced option to remove all tensors from the given layers\n");
135135
printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 16.0\n");
136136
printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n");
137-
printf(" --bpw_bias: type of error bias to use: 0 = no bias (MSE only), 1 = fast (default), 2 = precise (slow)\n");
137+
printf(" --no-bias: use mean square error estimation only (no aligment bias)\n");
138+
printf(" Advanced option use MSE only and disable aligment bias error estimation\n");
138139
printf(" --keep-split: will generate quantized model in the same shards as input\n");
139140
printf(" --override-kv KEY=TYPE:VALUE\n");
140141
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@@ -496,27 +497,6 @@ static bool parse_target_bpw(const char * data, float & target_bpw) {
496497
return true;
497498
}
498499

499-
static bool parse_bpw_bias(const char * data, int & bpw_bias) {
500-
if (!data) {
501-
printf("\n%s: error bias type not provided\n\n", __func__);
502-
return false;
503-
}
504-
505-
try {
506-
bpw_bias = std::stoi(data);
507-
if (bpw_bias < 0 || bpw_bias > 2) {
508-
printf("\n%s: error bias type must be one of 0 (no bias, MSE only), 1 (fast), or 2 (precise, but slow)\n\n", __func__);
509-
return false;
510-
}
511-
}
512-
catch (const std::exception & e) {
513-
printf("\n%s: '%s' is not valid. Target bits per weight (bpw) must be a positive number between 0.0 and 16.0\n\n", __func__, data);
514-
return false;
515-
}
516-
517-
return true;
518-
}
519-
520500
int main(int argc, char ** argv) {
521501
if (argc < 3) {
522502
usage(argv[0]);
@@ -531,7 +511,6 @@ int main(int argc, char ** argv) {
531511
std::vector<tensor_quantization> tensor_types;
532512
std::vector<int> prune_layers;
533513
float target_bpw = -1.0f;
534-
int bpw_bias = 1;
535514

536515
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
537516
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
@@ -562,11 +541,8 @@ int main(int argc, char ** argv) {
562541
if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) {
563542
usage(argv[0]);
564543
}
565-
} else if (strcmp(argv[arg_idx], "--bpw-bias") == 0) {
566-
if (arg_idx == argc-1 || !parse_bpw_bias(argv[++arg_idx], bpw_bias)) {
567-
usage(argv[0]);
568-
}
569-
params.bpw_bias = bpw_bias;
544+
} else if (strcmp(argv[arg_idx], "--no-bias") == 0) {
545+
params.no_bias = true;
570546
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
571547
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
572548
usage(argv[0]);

0 commit comments

Comments
 (0)