Skip to content

Commit 8b7536b

Browse files
ikawrakowIwan Kawrakow
andauthored
IQ1_S_R4: better 1.5 bpw quants (#185)
* iq1_s_r4: basics - quantize/dequantize * iq1_s_r4: gemm/gemv works on AVX2/Zen4 * Don't forget to make sure we have a multiple of 4 rows per thread * iq1_s_r4: this is better * iq1_s_r4: fix Zen4 after AVX2 changes * iq1_s_r4: NEON gemm/gemv * iq1_s_r4: more bits for shared experts With this mix we arrive at PPL(512) = 9.4140 for Deepseek-Lite using 1.766 bpw for the repeating layers. On the Ryzen-7950X we get PP-512 = 494 t/s and TG-128 = 52 t/s @ 16 threads. * Forgotten counter increment * iq1_s_r4: slightly faster AVX2/Zen4 gemm/gemv * Compiler warnings --------- Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent ecf111a commit 8b7536b

File tree

11 files changed

+1104
-93
lines changed

11 files changed

+1104
-93
lines changed

examples/quantize/quantize.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
2929
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
3030
{ "IQ2_M_R4", LLAMA_FTYPE_MOSTLY_IQ2_M_R4, " 2.7 bpw quantization", },
3131
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
32+
{ "IQ1_S_R4", LLAMA_FTYPE_MOSTLY_IQ1_S_R4, " 1.5 bpw quantization", },
3233
{ "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
3334
{ "IQ1_BN", LLAMA_FTYPE_MOSTLY_IQ1_BN, " 1.62 bpw quantization (Bitnet)", },
3435
{ "IQ2_BN", LLAMA_FTYPE_MOSTLY_IQ2_BN, " 2.00 bpw quantization (Bitnet)", },
@@ -510,6 +511,7 @@ int main(int argc, char ** argv) {
510511
params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS_R4 ||
511512
params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS_R4 ||
512513
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
514+
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S_R4 ||
513515
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M)) {
514516
fprintf(stderr, "\n==========================================================================================================\n");
515517
fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");

ggml/include/ggml.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,7 @@ extern "C" {
427427
GGML_TYPE_IQ2_XXS_R4= 216,
428428
GGML_TYPE_IQ2_XS_R4 = 217,
429429
GGML_TYPE_IQ3_XXS_R4= 218,
430+
GGML_TYPE_IQ1_S_R4 = 219,
430431
GGML_TYPE_IQ4_NL_R4 = 220,
431432
GGML_TYPE_IQ3_S_R4 = 221,
432433
GGML_TYPE_IQ2_S_R4 = 222,
@@ -510,6 +511,7 @@ extern "C" {
510511
GGML_FTYPE_MOSTLY_IQ2_XXS_R4= 215, // except 1d tensors
511512
GGML_FTYPE_MOSTLY_IQ2_XS_R4 = 216, // except 1d tensors
512513
GGML_FTYPE_MOSTLY_IQ3_XXS_R4= 217, // except 1d tensors
514+
GGML_FTYPE_MOSTLY_IQ1_S_R4 = 218, // except 1d tensors
513515
GGML_FTYPE_MOSTLY_IQ4_NL_R4 = 219, // except 1d tensors
514516
GGML_FTYPE_MOSTLY_IQ3_S_R4 = 220, // except 1d tensors
515517
GGML_FTYPE_MOSTLY_IQ2_S_R4 = 221, // except 1d tensors

ggml/src/ggml-common.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,6 +485,12 @@ typedef struct {
485485
} block_iq1_s;
486486
static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding");
487487

488+
typedef struct {
489+
uint8_t qs[16];
490+
uint16_t qh[4];
491+
} block_iq1_s_r4;
492+
static_assert(sizeof(block_iq1_s_r4) == 24, "wrong iq1_s_r4 block size/padding");
493+
488494
// 1.75 bpw
489495
typedef struct {
490496
uint8_t qs[QK_K/8]; // grid index, low 8 bits

ggml/src/ggml-quants.c

Lines changed: 203 additions & 86 deletions
Large diffs are not rendered by default.

ggml/src/ggml-quants.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ void quantize_row_iq4_xs_ref (const float * GGML_RESTRICT x, block_iq4_xs * GGM
4242
void quantize_row_iq3_s_ref (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k);
4343
void quantize_row_iq2_s_ref (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k);
4444
void quantize_row_iq1_bn_ref (const float * GGML_RESTRICT x, block_iq1_bn * GGML_RESTRICT y, int64_t k);
45+
void quantize_row_iq1_s_ref (const float * GGML_RESTRICT x, block_iq1_s * GGML_RESTRICT y, int64_t k);
4546

4647
void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
4748
void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
@@ -66,6 +67,7 @@ void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y,
6667
void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
6768
void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
6869
void quantize_row_iq1_bn (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
70+
void quantize_row_iq1_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
6971

7072
// Dequantization
7173
void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
@@ -148,6 +150,9 @@ void iq2xs_free_impl(enum ggml_type type);
148150
void iq3xs_init_impl(int grid_size);
149151
void iq3xs_free_impl(int grid_size);
150152

153+
void iq1s_process_1block(int block_size, const float * xb, const float * weight, int8_t * L,
154+
float * the_scale, uint16_t * the_index, int * the_shift, float * pairs, float * sumx, float * sumw);
155+
151156
#if defined(__ARM_FEATURE_SVE)
152157
extern int ggml_sve_cnt_b;
153158
#endif

ggml/src/ggml.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,13 +1176,26 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
11761176
.type_size = sizeof(block_iq1_s),
11771177
.is_quantized = true,
11781178
.to_float = (ggml_to_float_t) dequantize_row_iq1_s,
1179-
.from_float = NULL,
1180-
.from_float_ref = NULL,
1179+
.from_float = quantize_row_iq1_s,
1180+
.from_float_ref = (ggml_from_float_t)quantize_row_iq1_s_ref,
11811181
.vec_dot = ggml_vec_dot_iq1_s_q8_K,
11821182
.vec_dot_type = GGML_TYPE_Q8_K,
11831183
.nrows = 1,
11841184
.row_meta_size = 0,
11851185
},
1186+
[GGML_TYPE_IQ1_S_R4] = {
1187+
.type_name = "iq1_s_r4",
1188+
.blck_size = 32,
1189+
.type_size = sizeof(block_iq1_s_r4)/4,
1190+
.is_quantized = true,
1191+
.to_float = (ggml_to_float_t) dequantize_row_iq1_s_r4,
1192+
.from_float = quantize_row_iq1_s_r4,
1193+
.from_float_ref = (ggml_from_float_t)quantize_row_iq1_s_r4_ref,
1194+
.vec_dot = vec_dot_iq1_s_r4_q8_k,
1195+
.vec_dot_type = GGML_TYPE_Q8_1_X4,
1196+
.nrows = 1,
1197+
.row_meta_size = 2,
1198+
},
11861199
[GGML_TYPE_IQ1_M] = {
11871200
.type_name = "iq1_m",
11881201
.blck_size = QK_K,
@@ -4387,6 +4400,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
43874400
case GGML_FTYPE_MOSTLY_IQ3_S_R4: wtype = GGML_TYPE_IQ3_S_R4; break;
43884401
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
43894402
case GGML_FTYPE_MOSTLY_IQ2_S_R4: wtype = GGML_TYPE_IQ2_S_R4; break;
4403+
case GGML_FTYPE_MOSTLY_IQ1_S_R4: wtype = GGML_TYPE_IQ1_S_R4; break;
43904404
case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break;
43914405
case GGML_FTYPE_MOSTLY_Q4_0_4_8: wtype = GGML_TYPE_Q4_0_4_8; break;
43924406
case GGML_FTYPE_MOSTLY_Q4_0_8_8: wtype = GGML_TYPE_Q4_0_8_8; break;
@@ -10934,6 +10948,7 @@ static void ggml_compute_forward_add(
1093410948
case GGML_TYPE_IQ3_S_R4:
1093510949
case GGML_TYPE_IQ2_S:
1093610950
case GGML_TYPE_IQ2_S_R4:
10951+
case GGML_TYPE_IQ1_S_R4:
1093710952
case GGML_TYPE_Q4_0_4_4:
1093810953
case GGML_TYPE_Q4_0_4_8:
1093910954
case GGML_TYPE_Q4_0_8_8:
@@ -11402,6 +11417,7 @@ static void ggml_compute_forward_add1(
1140211417
case GGML_TYPE_IQ3_S_R4:
1140311418
case GGML_TYPE_IQ2_S:
1140411419
case GGML_TYPE_IQ2_S_R4:
11420+
case GGML_TYPE_IQ1_S_R4:
1140511421
case GGML_TYPE_Q4_0_4_4:
1140611422
case GGML_TYPE_Q4_0_4_8:
1140711423
case GGML_TYPE_Q4_0_8_8:
@@ -11567,6 +11583,7 @@ static void ggml_compute_forward_acc(
1156711583
case GGML_TYPE_IQ3_S_R4:
1156811584
case GGML_TYPE_IQ2_S:
1156911585
case GGML_TYPE_IQ2_S_R4:
11586+
case GGML_TYPE_IQ1_S_R4:
1157011587
case GGML_TYPE_Q4_0_4_4:
1157111588
case GGML_TYPE_Q4_0_4_8:
1157211589
case GGML_TYPE_Q4_0_8_8:
@@ -14805,6 +14822,7 @@ static void ggml_compute_forward_out_prod(
1480514822
case GGML_TYPE_IQ3_S_R4:
1480614823
case GGML_TYPE_IQ2_S:
1480714824
case GGML_TYPE_IQ2_S_R4:
14825+
case GGML_TYPE_IQ1_S_R4:
1480814826
case GGML_TYPE_Q4_0_4_4:
1480914827
case GGML_TYPE_Q4_0_4_8:
1481014828
case GGML_TYPE_Q4_0_8_8:
@@ -15210,6 +15228,7 @@ static void ggml_compute_forward_set(
1521015228
case GGML_TYPE_IQ3_S_R4:
1521115229
case GGML_TYPE_IQ2_S:
1521215230
case GGML_TYPE_IQ2_S_R4:
15231+
case GGML_TYPE_IQ1_S_R4:
1521315232
case GGML_TYPE_Q4_0_4_4:
1521415233
case GGML_TYPE_Q4_0_4_8:
1521515234
case GGML_TYPE_Q4_0_8_8:
@@ -15509,6 +15528,7 @@ static void ggml_compute_forward_get_rows(
1550915528
case GGML_TYPE_IQ3_S_R4:
1551015529
case GGML_TYPE_IQ2_S:
1551115530
case GGML_TYPE_IQ2_S_R4:
15531+
case GGML_TYPE_IQ1_S_R4:
1551215532
case GGML_TYPE_Q4_0_4_4:
1551315533
case GGML_TYPE_Q4_0_4_8:
1551415534
case GGML_TYPE_Q4_0_8_8:
@@ -16137,6 +16157,7 @@ static void ggml_compute_forward_clamp(
1613716157
case GGML_TYPE_IQ3_S_R4:
1613816158
case GGML_TYPE_IQ2_S:
1613916159
case GGML_TYPE_IQ2_S_R4:
16160+
case GGML_TYPE_IQ1_S_R4:
1614016161
case GGML_TYPE_Q8_K:
1614116162
case GGML_TYPE_Q8_K64:
1614216163
case GGML_TYPE_Q8_K16:
@@ -22893,6 +22914,7 @@ void ggml_quantize_init(enum ggml_type type) {
2289322914
case GGML_TYPE_IQ2_S:
2289422915
case GGML_TYPE_IQ1_S:
2289522916
case GGML_TYPE_IQ1_M: iq2xs_init_impl(type); break;
22917+
case GGML_TYPE_IQ1_S_R4:iq2xs_init_impl(GGML_TYPE_IQ1_S); break;
2289622918
case GGML_TYPE_IQ3_XXS_R4:
2289722919
case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
2289822920
case GGML_TYPE_IQ3_S_R4:
@@ -22975,6 +22997,7 @@ size_t ggml_quantize_chunk(
2297522997
case GGML_TYPE_IQ3_S_R4:result = quantize_iq3_s_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
2297622998
case GGML_TYPE_IQ2_S: result = quantize_iq2_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
2297722999
case GGML_TYPE_IQ2_S_R4:result = quantize_iq2_s_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
23000+
case GGML_TYPE_IQ1_S_R4:result = quantize_iq1_s_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
2297823001
case GGML_TYPE_IQ1_S: result = quantize_iq1_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
2297923002
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
2298023003
case GGML_TYPE_IQ1_BN: result = quantize_iq1_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;

0 commit comments

Comments
 (0)