Skip to content

Commit 45fae1a

Browse files
ikawrakowIwan Kawrakow
andauthored
Adding IQ2_KL (#602)
* Experiments for 2.6875 bpw quants At least according to rmse, this is significantly better than q2_K, while using only 1/16 more bits per weight. * iq2_kl: basics * iq2_kl: CUDA dequantize * iq2_kl: small improvement in PPL Also check the two neighbouring values for the block scale and use the one that minimizes RMSE. * iq2_kl: MMQ Quite good: PP-512(L3-8B) = 8472 t/s. * iq2_kl: MMVQ We get PP-128(L3-8B) = 162 t/s. Which means that this is not quite as good as it should be as (almost) same bpq q2_K is at 170 t/s. * iq2_kl: Zen4 GEMM/GEMV Not particularly fast. I may need to think about rearranging the bits. * iq2_kl: better Zen4 * iq2_kl: convert/repack to q8_k_r8 (AVX2) * iq2_kl: AVX2 GEMM/GEMV * iq2_kl: WIP NEON The compiler started crashing!!! * iq2_kl: NEON Had to work around a compiler crash when using vzip2q_u8 using vqtbl2q_u8. * iq2_kl: convert/repack to q8_k_r8 (NEON) * iq2_kl: Metal dequantize * iq2_kl: Metal GEMV - pretty slow * iq2_kl: Metal GEMV - slightly better (40 t/s -> 44.5 t/s) * iq2_kl: Metal GEMV - slightly better (44.5 t/s -> 46.5 t/s) * iq2_kl: Metal GEMV - slightly better (46.5 t/s -> 47.2 t/s) * iq2_kl: slightly better Metal dequantize PP-512 goes to 476 t/s up from 466 t/s. * iq2_kl: slightly better Metal dequantize PP-512 goes to 492 t/s up from 476 t/s. * Add iq2_kl to constants.py --------- Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent f535304 commit 45fae1a

File tree

24 files changed

+1819
-12
lines changed

24 files changed

+1819
-12
lines changed

examples/quantize-stats/quantize-stats.cpp

Lines changed: 529 additions & 0 deletions
Large diffs are not rendered by default.

examples/quantize/quantize.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
7676
{ "IQ2_K_R4", LLAMA_FTYPE_MOSTLY_IQ2_K_R4, "IQ2_K repacked",},
7777
{ "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",},
7878
{ "IQ2_KT", LLAMA_FTYPE_MOSTLY_IQ2_KT, " 2.125 bpw trellis quantization", },
79+
{ "IQ2_KL", LLAMA_FTYPE_MOSTLY_IQ2_KL, " 2.69 bpw non-linear quantization", },
7980
{ "IQ3_KS", LLAMA_FTYPE_MOSTLY_IQ3_KS, " 3.19 bpw non-linear quantization", },
8081
{ "IQ3_K", LLAMA_FTYPE_MOSTLY_IQ3_K, " 3.44 bpw non-linear quantization", },
8182
{ "IQ3_K_R4", LLAMA_FTYPE_MOSTLY_IQ3_K_R4, "IQ3_K repacked", },

ggml/include/ggml.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,7 @@ extern "C" {
430430
GGML_TYPE_IQ3_KT = 154,
431431
GGML_TYPE_IQ4_KT = 155,
432432
GGML_TYPE_IQ3_KS = 156,
433+
GGML_TYPE_IQ2_KL = 157,
433434

434435
GGML_TYPE_Q4_0_R8 = 202,
435436
GGML_TYPE_Q5_0_R4 = 206,
@@ -523,6 +524,7 @@ extern "C" {
523524
GGML_FTYPE_MOSTLY_IQ3_KT = 143, // except 1d tensors
524525
GGML_FTYPE_MOSTLY_IQ4_KT = 144, // except 1d tensors
525526
GGML_FTYPE_MOSTLY_IQ3_KS = 145, // except 1d tensors
527+
GGML_FTYPE_MOSTLY_IQ2_KL = 146, // except 1d tensors
526528
//
527529
GGML_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
528530
GGML_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors

ggml/src/ggml-common.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -606,6 +606,14 @@ typedef struct {
606606
} block_iq2_k;
607607
static_assert(sizeof(block_iq2_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/32 + QK_K/4, "wrong iq2_k block size/padding");
608608

609+
typedef struct {
610+
uint16_t scales_h;
611+
uint8_t scales_l[QK_K/64];
612+
uint8_t qs[QK_K/4];
613+
uint8_t qh[QK_K/16];
614+
} block_iq2_kl;
615+
static_assert(sizeof(block_iq2_kl) == sizeof(uint16_t) + QK_K/64 + QK_K/4 + QK_K/16, "wrong iq2_kl block size/padding");
616+
609617
typedef struct {
610618
ggml_half d[4];
611619
uint8_t extra[8];
@@ -2164,6 +2172,12 @@ GGML_TABLE_BEGIN(int8_t, iq2nl_values, 8)
21642172
-31, -13, 1, 17, -26, -8, 6, 22
21652173
GGML_TABLE_END()
21662174

2175+
GGML_TABLE_BEGIN(uint16_t, iq2kl_values, 32)
2176+
0xe9c1, 0x0dc1, 0xc1d8, 0xf6d8, 0x0dd8, 0x2fd8, 0xd8e9, 0xe9e9, 0x01e9, 0x0de9, 0x1ce9, 0xc1f6, 0x01f6, 0x0df6, 0x2ff6, 0xe901,
2177+
0xf601, 0x0101, 0x0d01, 0x1c01, 0xd80d, 0xe90d, 0xf60d, 0x010d, 0x0d0d, 0xc11c, 0xe91c, 0x011c, 0x1c1c, 0x2f1c, 0xe92f, 0x0d2f,
2178+
GGML_TABLE_END()
2179+
2180+
21672181
GGML_TABLE_BEGIN(int8_t, iq3nl_values, 16)
21682182
-63, -40, -23, -10, 1, 13, 28, 47,
21692183
-59, -36, -19, -6, 5, 17, 32, 51,

ggml/src/ggml-cuda.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3499,6 +3499,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
34993499
case GGML_TYPE_IQ3_XXS:
35003500
case GGML_TYPE_IQ4_NL:
35013501
case GGML_TYPE_IQ4_XS:
3502+
case GGML_TYPE_IQ2_KL:
35023503
case GGML_TYPE_IQ3_KS:
35033504
case GGML_TYPE_IQ4_KS:
35043505
case GGML_TYPE_IQ4_KSS:

ggml/src/ggml-cuda/common.cuh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -599,6 +599,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K> {
599599
static constexpr int qi = QI4_XS;
600600
};
601601

602+
template<>
603+
struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KL> {
604+
static constexpr int qk = QK_K;
605+
static constexpr int qr = QR4_XS;
606+
static constexpr int qi = QI4_XS;
607+
};
608+
602609
template<>
603610
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_KS> {
604611
static constexpr int qk = QK_K;

ggml/src/ggml-cuda/convert.cu

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1333,6 +1333,48 @@ static __global__ void dequantize_block_iq3_k(const void * __restrict__ vx, dst_
13331333
}
13341334
}
13351335

1336+
template<typename dst_t>
1337+
static __global__ void dequantize_block_iq2_kl(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
1338+
1339+
int64_t ii = blockIdx.x;
1340+
int64_t row = (QK_K * ii) / n_per_row;
1341+
const char * cx = (const char *)vx + row * row_size;
1342+
float scale = (float)*(const ggml_half *)cx;
1343+
const block_iq2_kl * x = (const block_iq2_kl *)(cx + sizeof(ggml_half));
1344+
const int64_t i = ii - (row*n_per_row)/QK_K;
1345+
1346+
const int64_t tid = threadIdx.x;
1347+
const int64_t ib64 = tid/8;
1348+
const int64_t il = tid%8;
1349+
dst_t * y = yy + ii*QK_K + 64*ib64 + 4*il;
1350+
const uint8_t * qs = x[i].qs + 16*ib64 + 2*il;
1351+
const uint8_t * qh = x[i].qh + 2*il;
1352+
auto sh = x[i].scales_h >> 4*ib64;
1353+
const float d1 = scale * (int(((x[i].scales_l[(2*ib64+0)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 4) & 0x30)) - 32);
1354+
const float d2 = scale * (int(((x[i].scales_l[(2*ib64+1)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 2) & 0x30)) - 32);
1355+
if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
1356+
for (int j = 0; j < 2; ++j) {
1357+
uint8_t h = qh[j] >> 2*ib64;
1358+
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
1359+
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
1360+
y[2*j+ 0] = __float2bfloat16(d1 * val1[0]);
1361+
y[2*j+ 1] = __float2bfloat16(d1 * val1[1]);
1362+
y[2*j+32] = __float2bfloat16(d2 * val2[0]);
1363+
y[2*j+33] = __float2bfloat16(d2 * val2[1]);
1364+
}
1365+
} else {
1366+
for (int j = 0; j < 2; ++j) {
1367+
uint8_t h = qh[j] >> 2*ib64;
1368+
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
1369+
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
1370+
y[2*j+ 0] = d1 * val1[0];
1371+
y[2*j+ 1] = d1 * val1[1];
1372+
y[2*j+32] = d2 * val2[0];
1373+
y[2*j+33] = d2 * val2[1];
1374+
}
1375+
}
1376+
}
1377+
13361378
template<typename dst_t>
13371379
static __global__ void dequantize_block_iq3_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
13381380

@@ -1618,6 +1660,14 @@ static void dequantize_row_iq3_k_cuda(const void * vx, dst_t * y, const int64_t
16181660
dequantize_block_iq3_k<<<nb, 32, 0, stream>>>(vx, y);
16191661
}
16201662

1663+
template<typename dst_t>
1664+
static void dequantize_row_iq2_kl_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
1665+
const int64_t k = nrows * n_per_row;
1666+
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ2_KL, n_per_row);
1667+
const int nb = (k + QK_K - 1) / QK_K;
1668+
dequantize_block_iq2_kl<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
1669+
}
1670+
16211671
template<typename dst_t>
16221672
static void dequantize_row_iq3_ks_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
16231673
const int64_t k = nrows * n_per_row;
@@ -1772,6 +1822,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
17721822
return dequantize_row_iq2_k_cuda<nv_bfloat16>;
17731823
case GGML_TYPE_IQ3_K:
17741824
return dequantize_row_iq3_k_cuda<nv_bfloat16>;
1825+
case GGML_TYPE_IQ2_KL:
1826+
return dequantize_row_iq2_kl_cuda<nv_bfloat16>;
17751827
case GGML_TYPE_IQ3_KS:
17761828
return dequantize_row_iq3_ks_cuda<nv_bfloat16>;
17771829
case GGML_TYPE_IQ4_KSS:
@@ -1876,6 +1928,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
18761928
return dequantize_row_iq2_k_cuda;
18771929
case GGML_TYPE_IQ3_K:
18781930
return dequantize_row_iq3_k_cuda;
1931+
case GGML_TYPE_IQ2_KL:
1932+
return dequantize_row_iq2_kl_cuda;
18791933
case GGML_TYPE_IQ3_KS:
18801934
return dequantize_row_iq3_ks_cuda;
18811935
case GGML_TYPE_IQ4_K:
@@ -1973,6 +2027,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
19732027
return dequantize_row_iq2_k_cuda;
19742028
case GGML_TYPE_IQ3_K:
19752029
return dequantize_row_iq3_k_cuda;
2030+
case GGML_TYPE_IQ2_KL:
2031+
return dequantize_row_iq2_kl_cuda;
19762032
case GGML_TYPE_IQ3_KS:
19772033
return dequantize_row_iq3_ks_cuda;
19782034
case GGML_TYPE_IQ4_K:

ggml/src/ggml-cuda/iqk_mmvq.cu

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,6 +1016,52 @@ __device__ __forceinline__ void vec_dot_iq3_k_q8_1(
10161016

10171017
}
10181018

1019+
// TODO
1020+
__device__ __forceinline__ void vec_dot_iq2_kl_q8_1(
1021+
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
1022+
1023+
float d = __half2float(*(const half *)vbq);
1024+
const block_iq2_kl * bq2 = (const block_iq2_kl *)((const char *)vbq + sizeof(half)) + kbx;
1025+
1026+
int iqs = iiqs/4;
1027+
const int ib64 = iqs/2; // 0...3. 0 works on quants 0...63, 1 on quants 64...127, etc.
1028+
// Each thread processes 16 quants in each of the 2 32-blocks
1029+
const int il16 = iqs%2; // 0...3. 0 works on quants 0...7, 1 on quants 8...15, 2 on 16...23, 3 on 24...31
1030+
1031+
const uint16_t * ql = (const uint16_t *)bq2->qs + 8*ib64 + 4*il16;
1032+
const uint16_t * qh = (const uint16_t *)bq2->qh + 4*il16;
1033+
1034+
int32_t aux32;
1035+
const uint8_t * aux8 = (const uint8_t *)&aux32;
1036+
1037+
const int * q8l = (const int *)bq8_1[2*ib64+0].qs + 4*il16;
1038+
const int * q8h = (const int *)bq8_1[2*ib64+1].qs + 4*il16;
1039+
1040+
int sumi1 = 0, sumi2 = 0;
1041+
int v1, v2;
1042+
for (int i = 0; i < 2; ++i) {
1043+
uint32_t vl = ql[2*i+0] | (ql[2*i+1] << 16);
1044+
uint32_t vh = (qh[2*i+0] | (qh[2*i+1] << 16)) >> 2*ib64;
1045+
1046+
aux32 = (vl & 0x0f0f0f0f) | ((vh << 4) & 0x10101010);
1047+
v1 = iq2kl_values[aux8[0]] | (iq2kl_values[aux8[1]] << 16);
1048+
v2 = iq2kl_values[aux8[2]] | (iq2kl_values[aux8[3]] << 16);
1049+
sumi1 = ggml_cuda_dp4a(v1, q8l[2*i+0], ggml_cuda_dp4a(v2, q8l[2*i+1], sumi1));
1050+
1051+
aux32 = ((vl >> 4) & 0x0f0f0f0f) | ((vh << 3) & 0x10101010);
1052+
v1 = iq2kl_values[aux8[0]] | (iq2kl_values[aux8[1]] << 16);
1053+
v2 = iq2kl_values[aux8[2]] | (iq2kl_values[aux8[3]] << 16);
1054+
sumi2 = ggml_cuda_dp4a(v1, q8h[2*i+0], ggml_cuda_dp4a(v2, q8h[2*i+1], sumi2));
1055+
}
1056+
1057+
auto sh = bq2->scales_h >> 4*ib64;
1058+
int ls1 = int(((bq2->scales_l[(2*ib64+0)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 4) & 0x30)) - 32;
1059+
int ls2 = int(((bq2->scales_l[(2*ib64+1)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 2) & 0x30)) - 32;
1060+
1061+
*result += d * (__low2float(bq8_1[2*ib64+0].ds) * ls1 * sumi1 + __low2float(bq8_1[2*ib64+1].ds) * ls2 * sumi2);
1062+
1063+
}
1064+
10191065
__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
10201066
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
10211067

@@ -1280,6 +1326,14 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
12801326
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
12811327
}
12821328

1329+
void mul_mat_vec_iq2_kl_q8_1_cuda(
1330+
const void * vx, const void * vy, float * dst, const char * ids_data,
1331+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
1332+
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
1333+
1334+
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ2_KL, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq2_kl_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
1335+
}
1336+
12831337
void mul_mat_vec_iq3_ks_q8_1_cuda(
12841338
const void * vx, const void * vy, float * dst, const char * ids_data,
12851339
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,

ggml/src/ggml-cuda/iqk_mmvq.cuh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,11 @@ void mul_mat_vec_iq3_k_q8_1_cuda(
1616
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
1717
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
1818

19+
void mul_mat_vec_iq2_kl_q8_1_cuda(
20+
const void * vx, const void * vy, float * dst, const char * ids_data,
21+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
22+
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
23+
1924
void mul_mat_vec_iq3_ks_q8_1_cuda(
2025
const void * vx, const void * vy, float * dst, const char * ids_data,
2126
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,

ggml/src/ggml-cuda/mmq.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,9 @@ void ggml_cuda_op_mul_mat_q(
9494
case GGML_TYPE_IQ4_NL:
9595
mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
9696
break;
97+
case GGML_TYPE_IQ2_KL:
98+
mul_mat_q_case<GGML_TYPE_IQ2_KL>(ctx, args, stream);
99+
break;
97100
case GGML_TYPE_IQ3_KS:
98101
mul_mat_q_case<GGML_TYPE_IQ3_KS>(ctx, args, stream);
99102
break;
@@ -201,6 +204,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
201204
case GGML_TYPE_IQ1_S_R4:
202205
case GGML_TYPE_IQ4_XS:
203206
case GGML_TYPE_IQ4_NL:
207+
case GGML_TYPE_IQ2_KL:
204208
case GGML_TYPE_IQ3_KS:
205209
case GGML_TYPE_IQ4_KS:
206210
case GGML_TYPE_IQ4_KS_R4:

0 commit comments

Comments
 (0)