@@ -172,7 +172,7 @@ struct Trellis3 {
172172 }
173173 IQK_ALWAYS_INLINE inline void next_128 (const uint32_t * val, __m256i * result) const {
174174#if defined(__AVX512F__) && defined(__AVX512VNNI__) && defined(__AVX512VL__)
175- // On AVX2 we don't have enough vector egisters to do this
175+ // On AVX2 we don't have enough vector registers to do this
176176 __m256i aux[16 ];
177177 auto perm = _mm256_setr_epi32 (0 , 2 , 4 , 6 , 1 , 3 , 5 , 7 );
178178 for (int k = 0 ; k < 4 ; ++k) {
@@ -211,7 +211,7 @@ struct Trellis3 {
211211 }
212212 IQK_ALWAYS_INLINE inline void next_128 (const uint16_t * val, uint32_t v0, __m256i * result) const {
213213#if defined(__AVX512F__) && defined(__AVX512VNNI__) && defined(__AVX512VL__)
214- // On AVX2 we don't have enough vector egisters to do this
214+ // On AVX2 we don't have enough vector registers to do this
215215 __m256i aux[16 ];
216216 for (int k = 0 ; k < 4 ; ++k) {
217217 auto v128 = _mm_add_epi32 (_mm_cvtepu16_epi32 (_mm_loadl_epi64 ((const __m128i *)(val + 4 *k))), _mm_set1_epi32 (v0));
@@ -947,10 +947,6 @@ void mul_mat_iq4_kt_q8_2_x4_T(int n, const void * vx, size_t bx, const DataInfo&
947947 }
948948 };
949949
950- // auto shift1 = _mm256_setr_epi32(8, 8, 8, 8, 20, 20, 20, 20);
951- // auto shift2 = _mm256_setr_epi32(12, 9, 6, 3, 12, 9, 6, 3);
952- // __m256i values[8];
953-
954950 for (int ix = 0 ; ix < nrc_x; ++ix) {
955951 const float * dptr = (const float *)((const char *)vx + ix*bx);
956952 auto d = _mm256_set1_ps (dptr[0 ]);
@@ -972,29 +968,9 @@ void mul_mat_iq4_kt_q8_2_x4_T(int n, const void * vx, size_t bx, const DataInfo&
972968 scales[1 ] = _mm256_set_m128 (scales_h, scales_h);
973969 o_helper.vec = _mm256_add_epi32 (_mm256_slli_epi32 (_mm256_and_si256 (vshb, _mm256_set1_epi32 (1 )), 15 ), _mm256_set1_epi32 (4096 ));
974970 for (int ib = 0 ; ib < 4 ; ++ib) {
975- // Somehow this is slower.
976- // auto idxl = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)(ql + 8*ib)));
977- // auto idxh = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)(ql + 8*ib + 32)));
978- // auto vh = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)(qh + 8*ib)));
979- // idxl = _mm256_or_si256(idxl, _mm256_and_si256(_mm256_slli_epi32(vh, 8), _mm256_set1_epi32(0xf00)));
980- // idxh = _mm256_or_si256(idxh, _mm256_and_si256(_mm256_slli_epi32(vh, 4), _mm256_set1_epi32(0xf00)));
981- // auto shl = _mm256_sllv_epi32(_mm256_srlv_epi32(_mm256_set1_epi32(shb[ib+0]), shift1), shift2);
982- // auto shh = _mm256_sllv_epi32(_mm256_srlv_epi32(_mm256_set1_epi32(shb[ib+4]), shift1), shift2);
983- // idxl = _mm256_or_si256(idxl, _mm256_and_si256(shl, _mm256_set1_epi32(0x7000)));
984- // idxh = _mm256_or_si256(idxh, _mm256_and_si256(shh, _mm256_set1_epi32(0x7000)));
985- // values[ib+0] = _mm256_add_epi32(idxl, _mm256_set1_epi32(o_helper.val[ib+0]));
986- // values[ib+4] = _mm256_add_epi32(idxh, _mm256_set1_epi32(o_helper.val[ib+4]));
987971 for (int j = 0 ; j < 2 ; ++j) {
988972 const uint32_t sh1 = shb[ib+0 ] >> (8 + 12 *j);
989973 const uint32_t sh2 = shb[ib+4 ] >> (8 + 12 *j);
990- // values[8*ib+4*j+ 0] = ql[8*ib+4*j+ 0] + ((qh[8*ib+4*j+0] << 8) & 0xf00) + ((sh1 & 7) << 12) + o_helper.val[ib+0];
991- // values[8*ib+4*j+ 1] = ql[8*ib+4*j+ 1] + ((qh[8*ib+4*j+1] << 8) & 0xf00) + ((sh1 & 56) << 9) + o_helper.val[ib+0];
992- // values[8*ib+4*j+ 2] = ql[8*ib+4*j+ 2] + ((qh[8*ib+4*j+2] << 8) & 0xf00) + ((sh1 & 448) << 6) + o_helper.val[ib+0];
993- // values[8*ib+4*j+ 3] = ql[8*ib+4*j+ 3] + ((qh[8*ib+4*j+3] << 8) & 0xf00) + ((sh1 & 3584) << 3) + o_helper.val[ib+0];
994- // values[8*ib+4*j+32] = ql[8*ib+4*j+32] + ((qh[8*ib+4*j+0] << 4) & 0xf00) + ((sh2 & 7) << 12) + o_helper.val[ib+4];
995- // values[8*ib+4*j+33] = ql[8*ib+4*j+33] + ((qh[8*ib+4*j+1] << 4) & 0xf00) + ((sh2 & 56) << 9) + o_helper.val[ib+4];
996- // values[8*ib+4*j+34] = ql[8*ib+4*j+34] + ((qh[8*ib+4*j+2] << 4) & 0xf00) + ((sh2 & 448) << 6) + o_helper.val[ib+4];
997- // values[8*ib+4*j+35] = ql[8*ib+4*j+35] + ((qh[8*ib+4*j+3] << 4) & 0xf00) + ((sh2 & 3584) << 3) + o_helper.val[ib+4];
998974 values[8 *ib+4 *j+ 0 ] = ql[8 *ib+4 *j+ 0 ] + ((qh[8 *ib+4 *j+0 ] << 8 ) & 0xf00 ) + ((sh1 << 12 ) & 0x7000 ) + o_helper.val [ib+0 ];
999975 values[8 *ib+4 *j+ 1 ] = ql[8 *ib+4 *j+ 1 ] + ((qh[8 *ib+4 *j+1 ] << 8 ) & 0xf00 ) + ((sh1 << 9 ) & 0x7000 ) + o_helper.val [ib+0 ];
1000976 values[8 *ib+4 *j+ 2 ] = ql[8 *ib+4 *j+ 2 ] + ((qh[8 *ib+4 *j+2 ] << 8 ) & 0xf00 ) + ((sh1 << 6 ) & 0x7000 ) + o_helper.val [ib+0 ];
@@ -1007,7 +983,6 @@ void mul_mat_iq4_kt_q8_2_x4_T(int n, const void * vx, size_t bx, const DataInfo&
1007983 }
1008984 for (int i128 = 0 ; i128 < 2 ; ++i128 ) {
1009985 trellis.next_128 (values + 32 *i128 , xv);
1010- // trellis.next_128(values + 4*i128, xv);
1011986 for (int iy = 0 ; iy < nrc_y; ++iy) {
1012987 const block_q8_2_x4& yb = y[iy][2 *i+i128 ];
1013988 auto dy4 = _mm_castsi128_ps (_mm_slli_epi32 (_mm_cvtepu16_epi32 (_mm_loadl_epi64 ((const __m128i *)yb.d )), 16 ));
0 commit comments