Skip to content

Commit 23198ce

Browse files
committed
Create a Custom Quantization Scheme (CQS) FTYPE
And integrate it in the tensors quantization tree.
1 parent bd575f0 commit 23198ce

File tree

3 files changed

+45
-14
lines changed

3 files changed

+45
-14
lines changed

examples/quantize/quantize.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
5252
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
5353
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
5454
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
55+
{ "CQS", LLAMA_FTYPE_CQS, "Custom Quantization Scheme", },
5556
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
5657
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
5758
};
@@ -101,10 +102,10 @@ static void usage(const char * executable) {
101102
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
102103
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
103104
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
104-
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n\n");
105-
printf(" Optional specific tensor quantization types to amend the selected quantization strategy type:\n");
106-
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
107-
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n");
105+
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
106+
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
107+
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
108+
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
108109
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
109110
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
110111
printf(" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n");
@@ -118,10 +119,11 @@ static void usage(const char * executable) {
118119
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n");
119120
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
120121
printf("Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n");
121-
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n");
122-
printf("Note: Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
123-
printf("Note: --attn-qkv-type replaces the types attn-q, attn-k, and attn-v on some models.\n");
124-
printf("Note: Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
122+
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n\n");
123+
printf("Note for the Custom Quant Scheme FTYPE:\n");
124+
printf(" Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
125+
printf(" Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
126+
printf(" attn-qkv-type replaces the types attn-q, attn-k and attn-v on some models.\n");
125127
//TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
126128
printf("\nAllowed quantization types:\n");
127129
for (auto & it : QUANT_OPTIONS) {

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ extern "C" {
166166
LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
167167
LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
168168
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
169+
LLAMA_FTYPE_CQS = 99, // except 1d tensors
169170

170171
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
171172
};

src/llama.cpp

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4478,6 +4478,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
44784478
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
44794479
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
44804480
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8";
4481+
case LLAMA_FTYPE_CQS: return "Custom Quantization Scheme";
44814482

44824483
default: return "unknown, may not work";
44834484
}
@@ -15381,7 +15382,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1538115382
}
1538215383
}
1538315384
} else if (name.find("attn_v.weight") != std::string::npos) {
15384-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
15385+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_v_type < GGML_TYPE_COUNT) {
15386+
new_type = qs.params->attn_v_type;
15387+
}
15388+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
1538515389
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
1538615390
}
1538715391
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
@@ -15419,7 +15423,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1541915423
}
1542015424
++qs.i_attention_wv;
1542115425
} else if (name.find("attn_k.weight") != std::string::npos) {
15422-
if (qs.model.hparams.n_expert == 8) {
15426+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_k_type < GGML_TYPE_COUNT) {
15427+
new_type = qs.params->attn_k_type;
15428+
}
15429+
else if (qs.model.hparams.n_expert == 8) {
1542315430
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1542415431
// TODO: explore better strategies
1542515432
new_type = GGML_TYPE_Q8_0;
@@ -15431,6 +15438,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1543115438
new_type = GGML_TYPE_IQ2_S;
1543215439
}
1543315440
} else if (name.find("attn_q.weight") != std::string::npos) {
15441+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_q_type < GGML_TYPE_COUNT) {
15442+
new_type = qs.params->attn_q_type;
15443+
}
1543415444
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
1543515445
new_type = GGML_TYPE_IQ3_XXS;
1543615446
}
@@ -15440,7 +15450,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1544015450
} else if (name.find("ffn_down") != std::string::npos) {
1544115451
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
1544215452
int i_layer = info.first, n_layer = info.second;
15443-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
15453+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_down_type < GGML_TYPE_COUNT) {
15454+
new_type = qs.params->ffn_down_type;
15455+
}
15456+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
1544415457
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
1544515458
if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
1544615459
}
@@ -15483,7 +15496,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1548315496
}
1548415497
++qs.i_ffn_down;
1548515498
} else if (name.find("attn_output.weight") != std::string::npos) {
15486-
if (arch != LLM_ARCH_FALCON) {
15499+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_output_type < GGML_TYPE_COUNT) {
15500+
new_type = qs.params->attn_output_type;
15501+
}
15502+
else if (arch != LLM_ARCH_FALCON) {
1548715503
if (qs.model.hparams.n_expert == 8) {
1548815504
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
1548915505
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
@@ -15503,6 +15519,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1550315519
}
1550415520
}
1550515521
else if (name.find("attn_qkv.weight") != std::string::npos) {
15522+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_qkv_type < GGML_TYPE_COUNT) {
15523+
new_type = qs.params->attn_qkv_type;
15524+
}
1550615525
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
1550715526
new_type = GGML_TYPE_Q4_K;
1550815527
}
@@ -15512,15 +15531,21 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1551215531
else if (name.find("ffn_gate") != std::string::npos) {
1551315532
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
1551415533
int i_layer = info.first, n_layer = info.second;
15515-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
15534+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_gate_type < GGML_TYPE_COUNT) {
15535+
new_type = qs.params->ffn_gate_type;
15536+
}
15537+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1551615538
new_type = GGML_TYPE_IQ3_XXS;
1551715539
}
1551815540
++qs.i_ffn_gate;
1551915541
}
1552015542
else if (name.find("ffn_up") != std::string::npos) {
1552115543
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
1552215544
int i_layer = info.first, n_layer = info.second;
15523-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
15545+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_up_type < GGML_TYPE_COUNT) {
15546+
new_type = qs.params->ffn_up_type;
15547+
}
15548+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1552415549
new_type = GGML_TYPE_IQ3_XXS;
1552515550
}
1552615551
++qs.i_ffn_up;
@@ -15671,6 +15696,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1567115696
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
1567215697
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break;
1567315698

15699+
// Custom Quantization Scheme
15700+
case LLAMA_FTYPE_CQS: default_type = GGML_TYPE_Q2_K; break;
15701+
1567415702
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
1567515703
}
1567615704

0 commit comments

Comments
 (0)