@@ -94,6 +94,7 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) {
9494 case GGML_TYPE_IQ5_KS_R4:
9595 case GGML_TYPE_IQ6_K:
9696 case GGML_TYPE_IQ2_KT:
97+ case GGML_TYPE_IQ3_KT:
9798 case GGML_TYPE_IQ4_KT:
9899 return MMQ_Q8_1_DS_LAYOUT_D4;
99100 default :
@@ -205,6 +206,7 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml
205206 case GGML_TYPE_IQ5_K : return MMQ_DP4A_TXS_Q8_0_16;
206207 case GGML_TYPE_IQ6_K : return MMQ_DP4A_TXS_Q8_0_16;
207208 case GGML_TYPE_IQ2_KT : return MMQ_DP4A_TXS_Q8_0;
209+ case GGML_TYPE_IQ3_KT : return MMQ_DP4A_TXS_Q8_0;
208210 case GGML_TYPE_IQ4_KT : return MMQ_DP4A_TXS_Q8_0;
209211 default : return tile_x_sizes{0 , 0 , 0 };
210212 }
@@ -255,6 +257,7 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
255257 case GGML_TYPE_IQ5_K : return MMQ_MMA_TILE_X_K_Q3_K;
256258 case GGML_TYPE_IQ6_K : return MMQ_MMA_TILE_X_K_Q3_K;
257259 case GGML_TYPE_IQ2_KT : return MMQ_MMA_TILE_X_K_Q8_0;
260+ case GGML_TYPE_IQ3_KT : return MMQ_MMA_TILE_X_K_Q8_0;
258261 case GGML_TYPE_IQ4_KT : return MMQ_MMA_TILE_X_K_Q8_0;
259262 default : return 0 ;
260263 }
@@ -2939,6 +2942,83 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
29392942 }
29402943}
29412944
2945+ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq3_kt (
2946+ const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
2947+
2948+ constexpr uint32_t ka = 0xCBAC1FED ;
2949+ constexpr uint32_t km = 0x3f3f3f3f ;
2950+
2951+ #ifdef INT8_MMA_AVAILABLE
2952+ int * x_qs = (int *) x_tile;
2953+ float * x_df = (float *) (x_qs + WARP_SIZE*2 );
2954+ #else
2955+ constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes (GGML_TYPE_IQ4_XS, mmq_y);
2956+ int * x_qs = (int *) x_tile;
2957+ float * x_df = (float *) (x_qs + txs.qs );
2958+ #endif // INT8_MMA_AVAILABLE
2959+
2960+ const int kqsx = threadIdx .x ;
2961+
2962+ #pragma unroll
2963+ for (int i0 = 0 ; i0 < mmq_y; i0 += nwarps) {
2964+ int i = i0 + threadIdx .y ;
2965+
2966+ if (need_check) {
2967+ i = min (i, i_max);
2968+ }
2969+
2970+ const block_iq3_kt * bxi = (const block_iq3_kt *)(x + i*stride + sizeof (float )) + kbx0;
2971+
2972+ int ib32 = kqsx/4 ;
2973+ int j = kqsx%4 ;
2974+ const auto ql = (const uint16_t *)bxi->ql ;
2975+ const auto qh = (const uint32_t *)bxi->qh ;
2976+ uint32_t mask = 0x01010101 << ib32;
2977+ uint32_t val = ql[4 *ib32+j] + 4096 ;
2978+ int2 v = {0 , 0 };
2979+ for (int k = 0 ; k < 4 ; ++k) {
2980+ val *= ka;
2981+ v.x |= std::abs (ggml_cuda_dp4a (val & km, 0x01010101 , -126 )) << 8 *k;
2982+ }
2983+ auto signs = __vcmpne4 (qh[2 *j+0 ] & mask, 0 );
2984+ v.x = __vsub4 (v.x ^ signs, signs);
2985+ for (int k = 0 ; k < 4 ; ++k) {
2986+ val *= ka;
2987+ v.y |= std::abs (ggml_cuda_dp4a (val & km, 0x01010101 , -126 )) << 8 *k;
2988+ }
2989+ signs = __vcmpne4 (qh[2 *j+1 ] & mask, 0 );
2990+ v.y = __vsub4 (v.y ^ signs, signs);
2991+ #ifdef INT8_MMA_AVAILABLE
2992+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8 *ib32 + 2 *j + 0 ] = v.x ;
2993+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8 *ib32 + 2 *j + 1 ] = v.y ;
2994+ #else
2995+ x_qs[i*(2 *WARP_SIZE + 1 ) + 8 *ib32 + 2 *j + 0 ] = v.x ;
2996+ x_qs[i*(2 *WARP_SIZE + 1 ) + 8 *ib32 + 2 *j + 1 ] = v.y ;
2997+ #endif // INT8_MMA_AVAILABLE
2998+ }
2999+
3000+ #pragma unroll
3001+ for (int i0 = 0 ; i0 < mmq_y; i0 += nwarps * 4 ) {
3002+ int i = i0 + threadIdx .y * 4 + threadIdx .x / (WARP_SIZE/4 );
3003+
3004+ if (need_check) {
3005+ i = min (i, i_max);
3006+ }
3007+
3008+ const float * dptr = (const float *)(x + i*stride);
3009+ const float d = dptr[0 ] * 1 .01f ;
3010+ const block_iq3_kt * bxi = (const block_iq3_kt *)(dptr + 1 ) + kbx0;
3011+ int ib32 = threadIdx .x % 8 ;
3012+ const int ls = (bxi->scales [ib32%4 ] >> 4 *(ib32/4 )) & 0xf ;
3013+
3014+ #ifdef INT8_MMA_AVAILABLE
3015+ x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx .x % 8 ] = d * ls;
3016+ #else
3017+ x_df[i*(WARP_SIZE/4 ) + i/4 + threadIdx .x % 8 ] = d * ls;
3018+ #endif // INT8_MMA_AVAILABLE
3019+ }
3020+ }
3021+
29423022template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq5_ks_r4 (
29433023 const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
29443024
@@ -3545,6 +3625,13 @@ struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ2_KT> {
35453625 static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
35463626};
35473627
3628+ template <int mmq_x, int mmq_y, int nwarps, bool need_check>
3629+ struct mmq_type_traits <mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ3_KT> {
3630+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_kt<mmq_y, nwarps, need_check>;
3631+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
3632+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
3633+ };
3634+
35483635template <int mmq_x, int mmq_y, int nwarps, bool need_check>
35493636struct mmq_type_traits <mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ5_KS> {
35503637 static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq5_ks<mmq_y, nwarps, need_check>;
@@ -4008,6 +4095,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_IQ6_K);
40084095extern DECL_MMQ_CASE (GGML_TYPE_IQ1_S_R4);
40094096extern DECL_MMQ_CASE (GGML_TYPE_IQ4_KT);
40104097extern DECL_MMQ_CASE (GGML_TYPE_IQ2_KT);
4098+ extern DECL_MMQ_CASE (GGML_TYPE_IQ3_KT);
40114099
40124100// -------------------------------------------------------------------------------------------------------------------------
40134101
0 commit comments