Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ggml/include/ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,7 @@ extern "C" {
GGML_TYPE_IQ5_K_R4 = 340,
GGML_TYPE_IQ4_KS_R4 = 344,
GGML_TYPE_IQ5_KS_R4 = 352,
GGML_TYPE_Q8_K_R16 = 397,
GGML_TYPE_Q8_KV_R8 = 398,
GGML_TYPE_Q8_K_R8 = 399,
GGML_TYPE_COUNT,
Expand Down Expand Up @@ -571,6 +572,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ5_K_R4 = 333, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_KS_R4 = 337, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ5_KS_R4 = 341, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_K_R16 = 397, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_KV_R8 = 398, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_K_R8 = 399, // except 1d tensors
};
Expand Down
6 changes: 6 additions & 0 deletions ggml/src/ggml-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,12 @@ typedef struct {
} block_q8_k_r8;
static_assert(sizeof(block_q8_k_r8) == 8*sizeof(ggml_half) + 8*QK_K, "wrong q8_k_r8 block size/padding");

typedef struct {
ggml_half d[16]; // delta
int8_t qs[16*QK_K]; // quants, stored as unsigned ints
} block_q8_k_r16;
static_assert(sizeof(block_q8_k_r16) == 16*sizeof(ggml_half) + 16*QK_K, "wrong q8_k_r16 block size/padding");

// (Almost) "true" 2-bit quantization.
// Due to the need to use blocks as per ggml design, it ends up using
// 2.0625 bpw because of the 16-bit scale for each block of 256.
Expand Down
1 change: 1 addition & 0 deletions ggml/src/ggml-quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -15461,6 +15461,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ5_KS_R4:break;
case GGML_TYPE_Q8_KV_R8: break;
case GGML_TYPE_Q8_K_R8: break;
case GGML_TYPE_Q8_K_R16: break;
case GGML_TYPE_Q8_KV: break;
case GGML_TYPE_BF16_R16: break;
case GGML_TYPE_Q4_0_4_4:
Expand Down
35 changes: 34 additions & 1 deletion ggml/src/ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -1071,6 +1071,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
[GGML_TYPE_Q8_K_R16] = {
.type_name = "q8_k_r16",
.blck_size = QK_K,
.type_size = sizeof(block_q8_k_r16)/16,
.is_quantized = true,
.to_float = (ggml_to_float_t) dequantize_row_q8_k_r16,
.from_float = quantize_row_q8_k_r16,
.from_float_ref = (ggml_from_float_t) quantize_row_q8_k_r16_ref,
.vec_dot = vec_dot_q8_k_r16_q8_k,
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
.row_meta_size = 0,
},
[GGML_TYPE_IQ2_XXS] = {
.type_name = "iq2_xxs",
.blck_size = QK_K,
Expand Down Expand Up @@ -1934,7 +1947,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
}

static inline int ggml_packed_rows(enum ggml_type type) {
return type == GGML_TYPE_BF16_R16 ? 16
return type == GGML_TYPE_BF16_R16 || type == GGML_TYPE_Q8_K_R16 ? 16
: type == GGML_TYPE_Q8_K_R8 || type == GGML_TYPE_Q8_KV_R8 ||
type == GGML_TYPE_Q8_0_R8 || type == GGML_TYPE_Q4_0_R8 ||
type == GGML_TYPE_IQ4_XS_R8 ? 8
Expand Down Expand Up @@ -4617,6 +4630,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
case GGML_FTYPE_MOSTLY_Q6_K_R4: wtype = GGML_TYPE_Q6_K_R4; break;
case GGML_FTYPE_MOSTLY_Q8_K_R8: wtype = GGML_TYPE_Q8_K_R8; break;
case GGML_FTYPE_MOSTLY_Q8_K_R16: wtype = GGML_TYPE_Q8_K_R16; break;
case GGML_FTYPE_MOSTLY_Q8_KV_R8: wtype = GGML_TYPE_Q8_KV_R8; break;
case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
case GGML_FTYPE_MOSTLY_IQ2_XXS_R4: wtype = GGML_TYPE_IQ2_XXS_R4;break;
Expand Down Expand Up @@ -11542,6 +11556,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -12094,6 +12109,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -12272,6 +12288,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -14966,6 +14983,17 @@ static void ggml_compute_forward_mul_mat(
#endif

#if GGML_USE_IQK_MULMAT
if (ith == 0) {
static bool first_time = true;
if (first_time) {
first_time = false;
#ifdef HAVE_FANCY_SIMD
printf("======================================= HAVE_FANCY_SIMD is defined\n");
#else
printf("======================================= HAVE_FANCY_SIMD is NOT defined\n");
#endif
}
}
if (dst->type == GGML_TYPE_F32) {
if (iqk_mul_mat_4d(ne01, ne11, ne00,
ne02, ne03, ne12, ne13, nb02, nb03, nb12, nb13, nb2/sizeof(float), nb3/sizeof(float),
Expand Down Expand Up @@ -15872,6 +15900,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -16290,6 +16319,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -16614,6 +16644,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XXS_R4:
Expand Down Expand Up @@ -17274,6 +17305,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_Q6_K:
case GGML_TYPE_Q6_K_R4:
case GGML_TYPE_Q8_K_R8:
case GGML_TYPE_Q8_K_R16:
case GGML_TYPE_Q8_KV_R8:
case GGML_TYPE_Q8_KR8:
case GGML_TYPE_IQ2_XXS:
Expand Down Expand Up @@ -24380,6 +24412,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_Q6_K: result = quantize_q6_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q6_K_R4: result = quantize_q6_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q8_K_R8: result = quantize_q8_k_r8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q8_K_R16:result = quantize_q8_k_r16(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q8_KV_R8:result = quantize_q8_KV_r8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_XXS: result = quantize_iq2_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_XXS_R4:result = quantize_iq2_xxs_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
Expand Down
54 changes: 54 additions & 0 deletions ggml/src/iqk/iqk_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,60 @@ inline void iqk_transpose_8x8(__m256 * m) {
}
}

template <int nr = 8>
static inline float convert_to_q8_k_r8(int k, float d0, const __m256i * qx, const int16_t * scales, uint32_t * block, int8_t * q8_k) {
auto max_i16 = _mm256_setzero_si256();
__m256i qs[16];
for (int ib32 = 0; ib32 < 8; ++ib32) {
qs[2*ib32+0] = _mm256_cvtepi8_epi16(_mm256_castsi256_si128(qx[ib32]));
qs[2*ib32+1] = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(qx[ib32], 1));
qs[2*ib32+0] = _mm256_mullo_epi16(qs[2*ib32+0], _mm256_set1_epi16(scales[2*ib32+0]));
qs[2*ib32+1] = _mm256_mullo_epi16(qs[2*ib32+1], _mm256_set1_epi16(scales[2*ib32+1]));
max_i16 = _mm256_max_epi16(max_i16, _mm256_sign_epi16(qs[2*ib32+0], qs[2*ib32+0]));
max_i16 = _mm256_max_epi16(max_i16, _mm256_sign_epi16(qs[2*ib32+1], qs[2*ib32+1]));
}
auto max_q32 = _mm256_cvtepi16_epi32(_mm_max_epi16(_mm256_castsi256_si128(max_i16), _mm256_extracti128_si256(max_i16, 1)));
auto imax4 = _mm_max_epi32(_mm256_castsi256_si128(max_q32), _mm256_extracti128_si256(max_q32, 1));
auto max4 = _mm_cvtepi32_ps(imax4);
max4 = _mm_max_ps(max4, _mm_movehl_ps(max4, max4));
max4 = _mm_max_ss(max4, _mm_movehdup_ps(max4));
bool needs_scaling = true;
float dnew = _mm_cvtss_f32(max4) * d0;
if (dnew < 1.f) {
dnew = 1.f; needs_scaling = false;
}
auto scale = _mm256_set1_ps(std::abs(dnew) > 1e-9f ? 1/dnew : 0.f);
for (int ib32 = 0; ib32 < 8; ++ib32) {
if (needs_scaling) {
auto i0 = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(qs[2*ib32+0]));
auto i1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(qs[2*ib32+0], 1));
auto i2 = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(qs[2*ib32+1]));
auto i3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(qs[2*ib32+1], 1));
i0 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i0)), _MM_ROUND_NEAREST));
i1 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i1)), _MM_ROUND_NEAREST));
i2 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i2)), _MM_ROUND_NEAREST));
i3 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i3)), _MM_ROUND_NEAREST));
i0 = _mm256_packs_epi32(i0, i1);
i2 = _mm256_packs_epi32(i2, i3);
i0 = _mm256_packs_epi16(i0, i2);
i0 = _mm256_permutevar8x32_epi32(i0, _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7));
_mm256_storeu_si256((__m256i *)block, i0);
} else {
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 17, 18, 19, 20, 21, 22, 23, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31
auto i0 = _mm256_packs_epi16(qs[2*ib32+0], qs[2*ib32+1]);
auto i0_l = _mm256_castsi256_si128(i0);
auto i0_h = _mm256_extracti128_si256(i0, 1);
_mm_storeu_si128((__m128i *)block+0, _mm_unpacklo_epi64(i0_l, i0_h));
_mm_storeu_si128((__m128i *)block+1, _mm_unpackhi_epi64(i0_l, i0_h));
}
auto qs = (uint32_t *)q8_k + 8*nr*ib32;
for (int l = 0; l < 8; ++l) {
qs[nr*l + k] = block[l];
}
}
return dnew;
}

#else
// ------------------------------------ __aarch64__ --------------------------------------------------

Expand Down
111 changes: 41 additions & 70 deletions ggml/src/iqk/iqk_gemm_1bit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1668,82 +1668,34 @@ static void mul_mat_iq2_bn_r4_q8_k16(int n, const void * vx, size_t bx, const Da
}
#endif

inline float convert_to_q8_k_r8(int k, int d0, const __m256i * qx, const int16_t * scales, uint32_t * block, int8_t * q8_k) {
auto max_i16 = _mm256_setzero_si256();
for (int ib32 = 0; ib32 < 8; ++ib32) {
auto q16_l = _mm256_cvtepi8_epi16(_mm256_castsi256_si128(qx[ib32]));
auto q16_h = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(qx[ib32], 1));
q16_l = _mm256_mullo_epi16(q16_l, _mm256_set1_epi16(scales[2*ib32+0]));
q16_h = _mm256_mullo_epi16(q16_h, _mm256_set1_epi16(scales[2*ib32+1]));
max_i16 = _mm256_max_epi16(max_i16, _mm256_sign_epi16(q16_l, q16_l));
max_i16 = _mm256_max_epi16(max_i16, _mm256_sign_epi16(q16_h, q16_h));
}
auto max_q32 = _mm256_cvtepi16_epi32(_mm_max_epi16(_mm256_castsi256_si128(max_i16), _mm256_extracti128_si256(max_i16, 1)));
auto imax4 = _mm_max_epi32(_mm256_castsi256_si128(max_q32), _mm256_extracti128_si256(max_q32, 1));
auto max4 = _mm_cvtepi32_ps(imax4);
max4 = _mm_max_ps(max4, _mm_movehl_ps(max4, max4));
max4 = _mm_max_ss(max4, _mm_movehdup_ps(max4));
bool needs_scaling = true;
float dnew = _mm_cvtss_f32(max4) / d0;
if (dnew < 1.f) {
dnew = 1.f; needs_scaling = false;
}
auto scale = _mm256_set1_ps(std::abs(dnew) > 1e-9f ? 1/dnew : 0.f);
for (int ib32 = 0; ib32 < 8; ++ib32) {
auto q16_l = _mm256_cvtepi8_epi16(_mm256_castsi256_si128(qx[ib32]));
auto q16_h = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(qx[ib32], 1));
q16_l = _mm256_mullo_epi16(q16_l, _mm256_set1_epi16(scales[2*ib32+0]));
q16_h = _mm256_mullo_epi16(q16_h, _mm256_set1_epi16(scales[2*ib32+1]));
if (needs_scaling) {
auto i0 = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(q16_l));
auto i1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(q16_l, 1));
auto i2 = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(q16_h));
auto i3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(q16_h, 1));
i0 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i0)), _MM_ROUND_NEAREST));
i1 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i1)), _MM_ROUND_NEAREST));
i2 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i2)), _MM_ROUND_NEAREST));
i3 = _mm256_cvtps_epi32(_mm256_round_ps(_mm256_mul_ps(scale, _mm256_cvtepi32_ps(i3)), _MM_ROUND_NEAREST));
i0 = _mm256_packs_epi32(i0, i1);
i2 = _mm256_packs_epi32(i2, i3);
i0 = _mm256_packs_epi16(i0, i2);
i0 = _mm256_permutevar8x32_epi32(i0, _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7));
_mm256_storeu_si256((__m256i *)block, i0);
} else {
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 17, 18, 19, 20, 21, 22, 23, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31
auto i0 = _mm256_packs_epi16(q16_l, q16_h);
auto i0_l = _mm256_castsi256_si128(i0);
auto i0_h = _mm256_extracti128_si256(i0, 1);
_mm_storeu_si128((__m128i *)block+0, _mm_unpacklo_epi64(i0_l, i0_h));
_mm_storeu_si128((__m128i *)block+1, _mm_unpackhi_epi64(i0_l, i0_h));
}
auto qs = (uint32_t *)q8_k + 64*ib32;
for (int l = 0; l < 8; ++l) {
qs[8*l + k] = block[l];
}
}
return dnew;
}

void iqk_convert_iq1_s_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
#ifdef HAVE_FANCY_SIMD
constexpr int k_nr = 16;
using block_q8_k_r = block_q8_k_r16;
#else
constexpr int k_nr = 8;
using block_q8_k_r = block_q8_k_r8;
#endif

GGML_ASSERT(n%QK_K == 0);
GGML_ASSERT(nrc_x%8 == 0);
GGML_ASSERT(nrc_x%k_nr == 0);

int nb = n/QK_K;

const block_iq1_s * x8[8];
const block_iq1_s * x8[k_nr];

block_q8_k_r8 * y = (block_q8_k_r8 *)vy;
block_q8_k_r * y = (block_q8_k_r *)vy;

int16_t ls[16];

uint32_t block[8];

__m256i qx[8];

for (int ix = 0; ix < nrc_x; ix += 8) {
for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_s *)((const char *)vx + (ix + k)*bx);
for (int ix = 0; ix < nrc_x; ix += k_nr) {
for (int k = 0; k < k_nr; ++k) x8[k] = (const block_iq1_s *)((const char *)vx + (ix + k)*bx);
for (int i = 0; i < nb; ++i) {
for (int k = 0; k < 8; ++k) {
for (int k = 0; k < k_nr; ++k) {
float d = 0.125f * GGML_FP16_TO_FP32(x8[k][i].d);
auto qs = x8[k][i].qs;
auto qh = x8[k][i].qh;
Expand All @@ -1759,23 +1711,36 @@ void iqk_convert_iq1_s_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int
qx[ib32] = value;
qs += 4;
}
float dnew = convert_to_q8_k_r8(k, 126, qx, ls, block, y[i].qs);
float dnew = convert_to_q8_k_r8<k_nr>(k, 1.f/126, qx, ls, block, y[i].qs);
y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
}
#ifdef HAVE_FANCY_SIMD
for (int l = 0; l < 64; ++l) {
auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128));
_mm512_storeu_si512((__m512i *)y[i].qs + l, v);
}
#endif
}
y += nb;
}
}

void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
#ifdef HAVE_FANCY_SIMD
constexpr int k_nr = 16;
using block_q8_k_r = block_q8_k_r16;
#else
constexpr int k_nr = 8;
using block_q8_k_r = block_q8_k_r8;
#endif
GGML_ASSERT(n%QK_K == 0);
GGML_ASSERT(nrc_x%8 == 0);
GGML_ASSERT(nrc_x%k_nr == 0);

int nb = n/QK_K;

const block_iq1_m * x8[8];
const block_iq1_m * x8[k_nr];

block_q8_k_r8 * y = (block_q8_k_r8 *)vy;
block_q8_k_r * y = (block_q8_k_r *)vy;

int16_t ls[16];

Expand All @@ -1785,10 +1750,10 @@ void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int

auto mask = _mm256_setr_epi32(0x00000008, 0x00000008, 0x00000080, 0x00000080, 0x00080000, 0x00080000, 0x00800000, 0x00800000);

for (int ix = 0; ix < nrc_x; ix += 8) {
for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_m *)((const char *)vx + (ix + k)*bx);
for (int ix = 0; ix < nrc_x; ix += k_nr) {
for (int k = 0; k < k_nr; ++k) x8[k] = (const block_iq1_m *)((const char *)vx + (ix + k)*bx);
for (int i = 0; i < nb; ++i) {
for (int k = 0; k < 8; ++k) {
for (int k = 0; k < k_nr; ++k) {
const uint16_t * sc = (const uint16_t *)x8[k][i].scales;
iq1m_scale_t scale;
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
Expand Down Expand Up @@ -1816,9 +1781,15 @@ void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int
qs += 4;
qh += 2;
}
float dnew = convert_to_q8_k_r8(k, 126, qx, ls, block, y[i].qs);
float dnew = convert_to_q8_k_r8<k_nr>(k, 1.f/126, qx, ls, block, y[i].qs);
y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
}
#ifdef HAVE_FANCY_SIMD
for (int l = 0; l < 64; ++l) {
auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128));
_mm512_storeu_si512((__m512i *)y[i].qs + l, v);
}
#endif
}
y += nb;
}
Expand Down
Loading