Skip to content

Commit abb966e

Browse files
ikawrakowIwan Kawrakow
andauthored
Allow quantization of ffn_gate_inp (ikawrakow#896)
Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent 15159a8 commit abb966e

File tree

4 files changed

+19
-2
lines changed

4 files changed

+19
-2
lines changed

examples/quantize/quantize.cpp

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
151151
//
152152
[[noreturn]]
153153
static void usage(const char * executable) {
154-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--hide-imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
154+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--hide-imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--ffn-gate-inp-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
155155
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
156156
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
157157
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
@@ -161,6 +161,7 @@ static void usage(const char * executable) {
161161
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
162162
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
163163
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
164+
printf(" --ffn-gate-inp-type ggml_type: use this ggml_type for the ffn_gate_inp tensors.\n\n");
164165
printf(" --custom-q regex1=type1,regex2=type2...: use this to specify custom quantization type rules.\n\n");
165166
printf(" --repack Repack all tensors to the corresponding _r4/8 variant if available.\n\n");
166167
printf(" --repack-pattern Comma separated list of regexs to use for matching tensor names to be repacked.\n\n");
@@ -375,6 +376,12 @@ int main(int argc, char ** argv) {
375376
} else {
376377
usage(argv[0]);
377378
}
379+
} else if (strcmp(argv[arg_idx], "--ffn-gate-inp-type") == 0) {
380+
if (arg_idx < argc-1) {
381+
params.ffn_gate_inp_type = parse_ggml_type(argv[++arg_idx]);
382+
} else {
383+
usage(argv[0]);
384+
}
378385
} else if (strcmp(argv[arg_idx], "--attn-q-type") == 0) {
379386
if (arg_idx < argc-1) {
380387
params.attn_q_type = parse_ggml_type(argv[++arg_idx]);

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,7 @@ extern "C" {
455455
enum ggml_type ffn_gate_type; // feedforward network gate type
456456
enum ggml_type ffn_down_type; // feedforward network down type
457457
enum ggml_type ffn_up_type; // feedforward network up type
458+
enum ggml_type ffn_gate_inp_type; // routed experts probabilities typy (relevant for MoE models only)
458459
bool allow_requantize; // allow quantizing non-f32/f16 tensors
459460
bool quantize_output_tensor; // quantize output.weight
460461
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored

src/llama-quantize.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1245,7 +1245,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
12451245

12461246
// do not quantize expert gating tensors
12471247
// NOTE: can't use LLM_TN here because the layer number is not known
1248-
quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
1248+
if (name.find("ffn_gate_inp.weight") != std::string::npos) {
1249+
if (params->ffn_gate_inp_type == GGML_TYPE_COUNT || params->ffn_gate_inp_type == tensor->type) {
1250+
quantize = false;
1251+
}
1252+
}
1253+
//quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
12491254

12501255
// do not quantize positional embeddings and token types (BERT)
12511256
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
@@ -1328,6 +1333,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
13281333
if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
13291334
new_type = params->output_tensor_type;
13301335
}
1336+
if (params->ffn_gate_inp_type < GGML_TYPE_COUNT && name.find("ffn_gate_inp.weight") != std::string::npos) {
1337+
new_type = params->ffn_gate_inp_type;
1338+
}
13311339
if (params->attn_q_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_q.weight") == 0) {
13321340
new_type = params->attn_q_type;
13331341
}

src/llama.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3863,6 +3863,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
38633863
/*.ffn_gate_type =*/ GGML_TYPE_COUNT,
38643864
/*.ffn_down_type =*/ GGML_TYPE_COUNT,
38653865
/*.ffn_up_type =*/ GGML_TYPE_COUNT,
3866+
/*.ffn_gat_inp_type =*/ GGML_TYPE_COUNT,
38663867
/*.allow_requantize =*/ false,
38673868
/*.quantize_output_tensor =*/ true,
38683869
/*.only_copy =*/ false,

0 commit comments

Comments
 (0)