Skip to content

Commit 0608e0d

Browse files
Iwan KawrakowNexesenex
authored andcommitted
IQ2_KL - Experiments for 2.6875 bpw quants
At least according to rmse, this is significantly better than q2_K, while using only 1/16 more bits per weight. iq2_kl: basics iq2_kl: CUDA dequantize iq2_kl: small improvement in PPL Also check the two neighbouring values for the block scale and use the one that minimizes RMSE. iq2_kl: MMQ Quite good: PP-512(L3-8B) = 8472 t/s. iq2_kl: MMVQ We get PP-128(L3-8B) = 162 t/s. Which means that this is not quite as good as it should be as (almost) same bpq q2_K is at 170 t/s. iq2_kl: Zen4 GEMM/GEMV Not particularly fast. I may need to think about rearranging the bits. iq2_kl: better Zen4 iq2_kl: convert/repack to q8_k_r8 (AVX2) iq2_kl: AVX2 GEMM/GEMV iq2_kl: WIP NEON The compiler started crashing!!! iq2_kl: NEON Had to work around a compiler crash when using vzip2q_u8 using vqtbl2q_u8. iq2_kl: convert/repack to q8_k_r8 (NEON) Update constants.py
1 parent eb1b5b1 commit 0608e0d

File tree

23 files changed

+1067
-9
lines changed

23 files changed

+1067
-9
lines changed

ggml/include/ggml.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,7 @@ extern "C" {
432432
GGML_TYPE_IQ3_KT = 154,
433433
GGML_TYPE_IQ4_KT = 155,
434434
GGML_TYPE_IQ3_KS = 156,
435+
GGML_TYPE_IQ2_KL = 157,
435436

436437
GGML_TYPE_IQ3_KS_V1 = 196,
437438

@@ -522,6 +523,7 @@ extern "C" {
522523
GGML_FTYPE_MOSTLY_IQ3_KT = 143, // except 1d tensors
523524
GGML_FTYPE_MOSTLY_IQ4_KT = 144, // except 1d tensors
524525
GGML_FTYPE_MOSTLY_IQ3_KS = 145, // except 1d tensors
526+
GGML_FTYPE_MOSTLY_IQ2_KL = 146, // except 1d tensors
525527

526528
GGML_FTYPE_MOSTLY_IQ3_KS_V1 = 185, // except 1d tensors
527529
//

ggml/src/ggml-common.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -705,6 +705,14 @@ typedef struct {
705705
} block_iq2_k;
706706
static_assert(sizeof(block_iq2_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/32 + QK_K/4, "wrong iq2_k block size/padding");
707707

708+
typedef struct {
709+
uint16_t scales_h;
710+
uint8_t scales_l[QK_K/64];
711+
uint8_t qs[QK_K/4];
712+
uint8_t qh[QK_K/16];
713+
} block_iq2_kl;
714+
static_assert(sizeof(block_iq2_kl) == sizeof(uint16_t) + QK_K/64 + QK_K/4 + QK_K/16, "wrong iq2_kl block size/padding");
715+
708716
typedef struct {
709717
ggml_half d[4];
710718
uint8_t extra[8];
@@ -2276,6 +2284,12 @@ GGML_TABLE_BEGIN(int8_t, iq2nl_values, 8)
22762284
-31, -13, 1, 17, -26, -8, 6, 22
22772285
GGML_TABLE_END()
22782286

2287+
GGML_TABLE_BEGIN(uint16_t, iq2kl_values, 32)
2288+
0xe9c1, 0x0dc1, 0xc1d8, 0xf6d8, 0x0dd8, 0x2fd8, 0xd8e9, 0xe9e9, 0x01e9, 0x0de9, 0x1ce9, 0xc1f6, 0x01f6, 0x0df6, 0x2ff6, 0xe901,
2289+
0xf601, 0x0101, 0x0d01, 0x1c01, 0xd80d, 0xe90d, 0xf60d, 0x010d, 0x0d0d, 0xc11c, 0xe91c, 0x011c, 0x1c1c, 0x2f1c, 0xe92f, 0x0d2f,
2290+
GGML_TABLE_END()
2291+
2292+
22792293
GGML_TABLE_BEGIN(int8_t, iq3nl_values, 16)
22802294
-63, -40, -23, -10, 1, 13, 28, 47,
22812295
-59, -36, -19, -6, 5, 17, 32, 51,

ggml/src/ggml-cpu/ggml-cpu.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -804,6 +804,12 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
804804
.vec_dot_type = GGML_TYPE_Q8_K,
805805
.nrows = 1,
806806
},
807+
[GGML_TYPE_IQ2_KL] = {
808+
.from_float = quantize_row_iq2_kl,
809+
.vec_dot = vec_dot_iq2_kl_q8_k,
810+
.vec_dot_type = GGML_TYPE_Q8_K,
811+
.nrows = 1,
812+
},
807813
[GGML_TYPE_IQ4_K] = {
808814
.from_float = quantize_row_iq4_k,
809815
.vec_dot = vec_dot_iq4_k_q8_k,

ggml/src/ggml-cpu/ops.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1746,6 +1746,7 @@ void ggml_compute_forward_add(
17461746
case GGML_TYPE_TQ2_0:
17471747
case GGML_TYPE_IQ3_KS_V1:
17481748
case GGML_TYPE_IQ3_KS:
1749+
case GGML_TYPE_IQ2_KL:
17491750
case GGML_TYPE_Q6_K_R4:
17501751
case GGML_TYPE_Q8_K_R8:
17511752
case GGML_TYPE_Q8_KR8:
@@ -2229,6 +2230,7 @@ void ggml_compute_forward_add1(
22292230
case GGML_TYPE_TQ2_0:
22302231
case GGML_TYPE_IQ3_KS_V1:
22312232
case GGML_TYPE_IQ3_KS:
2233+
case GGML_TYPE_IQ2_KL:
22322234
case GGML_TYPE_Q6_K_R4:
22332235
case GGML_TYPE_Q8_K_R8:
22342236
case GGML_TYPE_Q8_KR8:
@@ -2409,6 +2411,7 @@ void ggml_compute_forward_acc(
24092411
case GGML_TYPE_TQ2_0:
24102412
case GGML_TYPE_IQ3_KS_V1:
24112413
case GGML_TYPE_IQ3_KS:
2414+
case GGML_TYPE_IQ2_KL:
24122415
case GGML_TYPE_Q6_K_R4:
24132416
case GGML_TYPE_Q8_K_R8:
24142417
case GGML_TYPE_Q8_KR8:
@@ -5404,6 +5407,7 @@ void ggml_compute_forward_out_prod(
54045407
case GGML_TYPE_TQ2_0:
54055408
case GGML_TYPE_IQ3_KS_V1:
54065409
case GGML_TYPE_IQ3_KS:
5410+
case GGML_TYPE_IQ2_KL:
54075411
case GGML_TYPE_Q6_K_R4:
54085412
case GGML_TYPE_Q8_K_R8:
54095413
case GGML_TYPE_Q8_KR8:
@@ -5925,6 +5929,7 @@ void ggml_compute_forward_set(
59255929
case GGML_TYPE_TQ2_0:
59265930
case GGML_TYPE_IQ3_KS_V1:
59275931
case GGML_TYPE_IQ3_KS:
5932+
case GGML_TYPE_IQ2_KL:
59285933
case GGML_TYPE_Q6_K_R4:
59295934
case GGML_TYPE_Q8_K_R8:
59305935
case GGML_TYPE_Q8_KR8:
@@ -6239,6 +6244,7 @@ void ggml_compute_forward_get_rows(
62396244
case GGML_TYPE_TQ2_0:
62406245
case GGML_TYPE_IQ3_KS_V1:
62416246
case GGML_TYPE_IQ3_KS:
6247+
case GGML_TYPE_IQ2_KL:
62426248
case GGML_TYPE_Q6_K_R4:
62436249
case GGML_TYPE_Q8_K_R8:
62446250
case GGML_TYPE_Q8_KR8:
@@ -6995,6 +7001,7 @@ void ggml_compute_forward_clamp(
69957001
case GGML_TYPE_TQ2_0:
69967002
case GGML_TYPE_IQ3_KS_V1:
69977003
case GGML_TYPE_IQ3_KS:
7004+
case GGML_TYPE_IQ2_KL:
69987005
case GGML_TYPE_Q6_K_R4:
69997006
case GGML_TYPE_Q8_K_R8:
70007007
case GGML_TYPE_Q8_KR8:

ggml/src/ggml-cuda/common.cuh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -725,6 +725,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K> {
725725
static constexpr int qi = QI4_XS;
726726
};
727727

728+
template<>
729+
struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KL> {
730+
static constexpr int qk = QK_K;
731+
static constexpr int qr = QR4_XS;
732+
static constexpr int qi = QI4_XS;
733+
};
734+
728735
template<>
729736
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_KS> {
730737
static constexpr int qk = QK_K;

ggml/src/ggml-cuda/convert.cu

Lines changed: 59 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ int __device__ __forceinline__ trellis_next_int(uint32_t& val) {
369369

370370
float __device__ __forceinline__ trellis_next(uint32_t& val) {
371371
constexpr uint32_t ka = 3417055213;
372-
constexpr uint32_t kb = 0;
372+
// constexpr uint32_t kb = 0;
373373
constexpr uint32_t kmask = 0x8fff8fff;
374374
constexpr uint32_t km32 = 0x3b603b60;
375375
uint32_t s;
@@ -1056,6 +1056,48 @@ static __global__ void dequantize_block_iq3_ks_v1(const void * __restrict__ vx,
10561056
}
10571057
}
10581058

1059+
template<typename dst_t>
1060+
static __global__ void dequantize_block_iq2_kl(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
1061+
1062+
int64_t ii = blockIdx.x;
1063+
int64_t row = (QK_K * ii) / n_per_row;
1064+
const char * cx = (const char *)vx + row * row_size;
1065+
float scale = (float)*(const ggml_half *)cx;
1066+
const block_iq2_kl * x = (const block_iq2_kl *)(cx + sizeof(ggml_half));
1067+
const int64_t i = ii - (row*n_per_row)/QK_K;
1068+
1069+
const int64_t tid = threadIdx.x;
1070+
const int64_t ib64 = tid/8;
1071+
const int64_t il = tid%8;
1072+
dst_t * y = yy + ii*QK_K + 64*ib64 + 4*il;
1073+
const uint8_t * qs = x[i].qs + 16*ib64 + 2*il;
1074+
const uint8_t * qh = x[i].qh + 2*il;
1075+
auto sh = x[i].scales_h >> 4*ib64;
1076+
const float d1 = scale * (int(((x[i].scales_l[(2*ib64+0)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 4) & 0x30)) - 32);
1077+
const float d2 = scale * (int(((x[i].scales_l[(2*ib64+1)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 2) & 0x30)) - 32);
1078+
if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
1079+
for (int j = 0; j < 2; ++j) {
1080+
uint8_t h = qh[j] >> 2*ib64;
1081+
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
1082+
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
1083+
y[2*j+ 0] = __float2bfloat16(d1 * val1[0]);
1084+
y[2*j+ 1] = __float2bfloat16(d1 * val1[1]);
1085+
y[2*j+32] = __float2bfloat16(d2 * val2[0]);
1086+
y[2*j+33] = __float2bfloat16(d2 * val2[1]);
1087+
}
1088+
} else {
1089+
for (int j = 0; j < 2; ++j) {
1090+
uint8_t h = qh[j] >> 2*ib64;
1091+
auto val1 = (const int8_t *)(iq2kl_values + ((qs[j] & 0xf) | ((h & 1) << 4)));
1092+
auto val2 = (const int8_t *)(iq2kl_values + ((qs[j] >> 4) | ((h & 2) << 3)));
1093+
y[2*j+ 0] = d1 * val1[0];
1094+
y[2*j+ 1] = d1 * val1[1];
1095+
y[2*j+32] = d2 * val2[0];
1096+
y[2*j+33] = d2 * val2[1];
1097+
}
1098+
}
1099+
}
1100+
10591101
template<typename dst_t>
10601102
static __global__ void dequantize_block_iq3_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
10611103

@@ -1668,6 +1710,14 @@ static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t
16681710
dequantize_block_iq2_k<<<nb, 32, 0, stream>>>(vx, y);
16691711
}
16701712

1713+
template <typename dst_t>
1714+
static void dequantize_row_iq2_kl_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
1715+
const int64_t k = nrows * n_per_row;
1716+
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ2_KL, n_per_row);
1717+
const int nb = (k + QK_K - 1) / QK_K;
1718+
dequantize_block_iq2_kl<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
1719+
}
1720+
16711721
template<typename dst_t>
16721722
static void dequantize_row_iq3_ks_v1_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
16731723
const int64_t k = nrows * n_per_row;
@@ -1869,6 +1919,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
18691919
return dequantize_row_iq2_ks_cuda<nv_bfloat16>;
18701920
case GGML_TYPE_IQ2_K:
18711921
return dequantize_row_iq2_k_cuda<nv_bfloat16>;
1922+
case GGML_TYPE_IQ2_KL:
1923+
return dequantize_row_iq2_kl_cuda<nv_bfloat16>;
18721924
case GGML_TYPE_IQ3_KS_V1:
18731925
return dequantize_row_iq3_ks_v1_cuda<nv_bfloat16>;
18741926
case GGML_TYPE_IQ3_KS:
@@ -1969,10 +2021,12 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
19692021
return dequantize_row_iq4_xs_cuda;
19702022
case GGML_TYPE_IQ2_KS:
19712023
return dequantize_row_iq2_ks_cuda;
1972-
case GGML_TYPE_IQ3_KS_V1:
1973-
return dequantize_row_iq3_ks_v1_cuda;
19742024
case GGML_TYPE_IQ2_K:
19752025
return dequantize_row_iq2_k_cuda;
2026+
case GGML_TYPE_IQ2_KL:
2027+
return dequantize_row_iq2_kl_cuda;
2028+
case GGML_TYPE_IQ3_KS_V1:
2029+
return dequantize_row_iq3_ks_v1_cuda;
19762030
case GGML_TYPE_IQ3_KS:
19772031
return dequantize_row_iq3_ks_cuda;
19782032
case GGML_TYPE_IQ3_K:
@@ -2081,6 +2135,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
20812135
return dequantize_row_iq2_k_cuda;
20822136
case GGML_TYPE_IQ3_K:
20832137
return dequantize_row_iq3_k_cuda;
2138+
case GGML_TYPE_IQ2_KL:
2139+
return dequantize_row_iq2_kl_cuda;
20842140
case GGML_TYPE_IQ3_KS:
20852141
return dequantize_row_iq3_ks_cuda;
20862142
case GGML_TYPE_IQ4_K:

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3445,6 +3445,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
34453445
case GGML_TYPE_IQ3_XXS:
34463446
case GGML_TYPE_IQ4_NL:
34473447
case GGML_TYPE_IQ4_XS:
3448+
case GGML_TYPE_IQ2_KL:
34483449
case GGML_TYPE_IQ3_KS:
34493450
case GGML_TYPE_IQ4_KS:
34503451
case GGML_TYPE_IQ4_KSS:

ggml/src/ggml-cuda/iqk_mmvq.cu

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,6 +1079,52 @@ __device__ __forceinline__ void vec_dot_iq3_ks_v1_q8_1(
10791079
__low2float(bq8_1[4*ib128+3].ds) * ((int)aux8[3] - 127) * sumi[3]);
10801080
}
10811081

1082+
// TODO
1083+
__device__ __forceinline__ void vec_dot_iq2_kl_q8_1(
1084+
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
1085+
1086+
float d = __half2float(*(const half *)vbq);
1087+
const block_iq2_kl * bq2 = (const block_iq2_kl *)((const char *)vbq + sizeof(half)) + kbx;
1088+
1089+
int iqs = iiqs/4;
1090+
const int ib64 = iqs/2; // 0...3. 0 works on quants 0...63, 1 on quants 64...127, etc.
1091+
// Each thread processes 16 quants in each of the 2 32-blocks
1092+
const int il16 = iqs%2; // 0...3. 0 works on quants 0...7, 1 on quants 8...15, 2 on 16...23, 3 on 24...31
1093+
1094+
const uint16_t * ql = (const uint16_t *)bq2->qs + 8*ib64 + 4*il16;
1095+
const uint16_t * qh = (const uint16_t *)bq2->qh + 4*il16;
1096+
1097+
int32_t aux32;
1098+
const uint8_t * aux8 = (const uint8_t *)&aux32;
1099+
1100+
const int * q8l = (const int *)bq8_1[2*ib64+0].qs + 4*il16;
1101+
const int * q8h = (const int *)bq8_1[2*ib64+1].qs + 4*il16;
1102+
1103+
int sumi1 = 0, sumi2 = 0;
1104+
int v1, v2;
1105+
for (int i = 0; i < 2; ++i) {
1106+
uint32_t vl = ql[2*i+0] | (ql[2*i+1] << 16);
1107+
uint32_t vh = (qh[2*i+0] | (qh[2*i+1] << 16)) >> 2*ib64;
1108+
1109+
aux32 = (vl & 0x0f0f0f0f) | ((vh << 4) & 0x10101010);
1110+
v1 = iq2kl_values[aux8[0]] | (iq2kl_values[aux8[1]] << 16);
1111+
v2 = iq2kl_values[aux8[2]] | (iq2kl_values[aux8[3]] << 16);
1112+
sumi1 = ggml_cuda_dp4a(v1, q8l[2*i+0], ggml_cuda_dp4a(v2, q8l[2*i+1], sumi1));
1113+
1114+
aux32 = ((vl >> 4) & 0x0f0f0f0f) | ((vh << 3) & 0x10101010);
1115+
v1 = iq2kl_values[aux8[0]] | (iq2kl_values[aux8[1]] << 16);
1116+
v2 = iq2kl_values[aux8[2]] | (iq2kl_values[aux8[3]] << 16);
1117+
sumi2 = ggml_cuda_dp4a(v1, q8h[2*i+0], ggml_cuda_dp4a(v2, q8h[2*i+1], sumi2));
1118+
}
1119+
1120+
auto sh = bq2->scales_h >> 4*ib64;
1121+
int ls1 = int(((bq2->scales_l[(2*ib64+0)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 4) & 0x30)) - 32;
1122+
int ls2 = int(((bq2->scales_l[(2*ib64+1)%4] >> 4*(ib64/2)) & 0xf) | ((sh << 2) & 0x30)) - 32;
1123+
1124+
*result += d * (__low2float(bq8_1[2*ib64+0].ds) * ls1 * sumi1 + __low2float(bq8_1[2*ib64+1].ds) * ls2 * sumi2);
1125+
1126+
}
1127+
10821128
__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
10831129
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
10841130

@@ -1268,6 +1314,13 @@ void mul_mat_vec_iq3_ks_v1_q8_1_cuda(
12681314
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ3_KS_V1, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq3_ks_v1_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
12691315
}
12701316

1317+
void mul_mat_vec_iq2_kl_q8_1_cuda(
1318+
const void * vx, const void * vy, float * dst,
1319+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
1320+
1321+
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ2_KL, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq2_kl_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
1322+
}
1323+
12711324
void mul_mat_vec_iq3_ks_q8_1_cuda(
12721325
const void * vx, const void * vy, float * dst,
12731326
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {

ggml/src/ggml-cuda/iqk_mmvq.cuh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,10 @@ void mul_mat_vec_iq2_ks_q8_1_cuda(
3232
const void * vx, const void * vy, float * dst,
3333
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
3434

35+
void mul_mat_vec_iq2_kl_q8_1_cuda(
36+
const void * vx, const void * vy, float * dst,
37+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
38+
3539
void mul_mat_vec_iq3_ks_v1_q8_1_cuda(
3640
const void * vx, const void * vy, float * dst,
3741
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);

ggml/src/ggml-cuda/mmq.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,9 @@ void ggml_cuda_op_mul_mat_q(
9898
case GGML_TYPE_IQ4_NL:
9999
mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
100100
break;
101+
case GGML_TYPE_IQ2_KL:
102+
mul_mat_q_case<GGML_TYPE_IQ2_KL>(ctx, args, stream);
103+
break;
101104
case GGML_TYPE_IQ3_KS:
102105
mul_mat_q_case<GGML_TYPE_IQ3_KS>(ctx, args, stream);
103106
break;
@@ -210,6 +213,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
210213
case GGML_TYPE_IQ1_S_R4:
211214
case GGML_TYPE_IQ4_XS:
212215
case GGML_TYPE_IQ4_NL:
216+
case GGML_TYPE_IQ2_KL:
213217
case GGML_TYPE_IQ3_KS:
214218
case GGML_TYPE_IQ4_KS:
215219
case GGML_TYPE_IQ4_KS_R4:

0 commit comments

Comments
 (0)