Skip to content

Commit c93131c

Browse files
committed
Remove --no-bias option
1 parent 3a3d807 commit c93131c

File tree

3 files changed

+2
-8
lines changed

3 files changed

+2
-8
lines changed

include/llama.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,6 @@ extern "C" {
365365
void * tensor_types; // pointer to vector containing tensor types
366366
void * prune_layers; // pointer to vector containing layer indices to prune
367367
float target_bpw; // target bits per weight (bpw)
368-
bool no_bias; // use mean square error estimation only (no aligment bias)
369368
} llama_model_quantize_params;
370369

371370
typedef struct llama_logit_bias {

src/llama-quant.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2180,8 +2180,7 @@ llama_model_quantize_params llama_model_quantize_default_params() {
21802180
/*.kv_overrides =*/ nullptr,
21812181
/*.tensor_type =*/ nullptr,
21822182
/*.prune_layers =*/ nullptr,
2183-
/*.target_bpw =*/ -1.0f,
2184-
/*.no_bias =*/ false
2183+
/*.target_bpw =*/ -1.0f
21852184
};
21862185

21872186
return result;

tools/quantize/quantize.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
118118
[[noreturn]]
119119
static void usage(const char * executable) {
120120
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights]\n", executable);
121-
printf(" [--target-bpw n] [--no-bias] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
121+
printf(" [--target-bpw n] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n");
122122
printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
123123
printf(" --allow-requantize: allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
124124
printf(" --leave-output-tensor: will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
@@ -134,8 +134,6 @@ static void usage(const char * executable) {
134134
printf(" Advanced option to remove all tensors from the given layers\n");
135135
printf(" --target-bpw: target bits per weight (bpw). Must be a positive number between 0.0 and 8.0\n");
136136
printf(" Advanced option to automatically select quantization types to achieve a total bits per weight (bpw) target\n");
137-
printf(" --no-bias: use mean square error estimation only (no aligment bias)\n");
138-
printf(" Advanced option use MSE only and disable aligment bias error estimation\n");
139137
printf(" --keep-split: will generate quantized model in the same shards as input\n");
140138
printf(" --override-kv KEY=TYPE:VALUE\n");
141139
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@@ -559,8 +557,6 @@ int main(int argc, char ** argv) {
559557
if (arg_idx == argc-1 || !parse_target_bpw(argv[++arg_idx], target_bpw)) {
560558
usage(argv[0]);
561559
}
562-
} else if (strcmp(argv[arg_idx], "--no-bias") == 0) {
563-
params.no_bias = true;
564560
} else if (strcmp(argv[arg_idx], "--prune-layers") == 0) {
565561
if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) {
566562
usage(argv[0]);

0 commit comments

Comments
 (0)