|
| 1 | +// |
| 2 | +// MIT license |
| 3 | +// Copyright (C) 2024 Intel Corporation |
| 4 | +// SPDX-License-Identifier: MIT |
| 5 | +// |
| 6 | + |
| 7 | +// |
| 8 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 9 | +// See https://llvm.org/LICENSE.txt for license information. |
| 10 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 11 | +// |
| 12 | + |
| 13 | +#include "ggml-impl.h" |
| 14 | +#include "common.hpp" |
| 15 | +#include "dequantize.hpp" |
| 16 | +#include "getrows.hpp" |
| 17 | + |
| 18 | + |
| 19 | +template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> |
| 20 | +static void k_get_rows( |
| 21 | + const void * src0, const int32_t * src1, dst_t * dst, |
| 22 | + int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ |
| 23 | + /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ |
| 24 | + /*size_t s0,*/ size_t s1, size_t s2, size_t s3, |
| 25 | + /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, |
| 26 | + size_t s10, size_t s11, size_t s12, |
| 27 | + const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { |
| 28 | + |
| 29 | + const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) + |
| 30 | + item_ct1.get_local_id(2)) * |
| 31 | + 2; |
| 32 | + const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + |
| 33 | + item_ct1.get_local_id(1); |
| 34 | + const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 35 | + item_ct1.get_local_id(0)) / |
| 36 | + ne12; |
| 37 | + const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 38 | + item_ct1.get_local_id(0)) % |
| 39 | + ne12; |
| 40 | + |
| 41 | + if (i00 >= ne00) { |
| 42 | + return; |
| 43 | + } |
| 44 | + |
| 45 | + const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; |
| 46 | + |
| 47 | + dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; |
| 48 | + const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03; |
| 49 | + |
| 50 | + const int ib = i00/qk; // block index |
| 51 | + const int iqs = (i00%qk)/qr; // quant index |
| 52 | + const int iybs = i00 - i00%qk; // dst block start index |
| 53 | + const int y_offset = qr == 1 ? 1 : qk/2; |
| 54 | + |
| 55 | + // dequantize |
| 56 | + dfloat2 v; |
| 57 | + dequantize_kernel(src0_row, ib, iqs, v); |
| 58 | + |
| 59 | + dst_row[iybs + iqs + 0] = v.x(); |
| 60 | + dst_row[iybs + iqs + y_offset] = v.y(); |
| 61 | +} |
| 62 | + |
| 63 | +template<int qk, int qr, dequantize_kernel_t_reorder dequantize_kernel_recorder, typename dst_t> |
| 64 | +static void k_get_rows_reorder( |
| 65 | + const void * src0, const void *src0_dq, const int32_t * src1, dst_t * dst, |
| 66 | + int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ |
| 67 | + /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ |
| 68 | + /*size_t s0,*/ size_t s1, size_t s2, size_t s3, |
| 69 | + /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, |
| 70 | + size_t s10, size_t s11, size_t s12, |
| 71 | + const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { |
| 72 | + |
| 73 | + const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) + |
| 74 | + item_ct1.get_local_id(2)) * |
| 75 | + 2; |
| 76 | + const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + |
| 77 | + item_ct1.get_local_id(1); |
| 78 | + const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 79 | + item_ct1.get_local_id(0)) / |
| 80 | + ne12; |
| 81 | + const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 82 | + item_ct1.get_local_id(0)) % |
| 83 | + ne12; |
| 84 | + |
| 85 | + if (i00 >= ne00) { |
| 86 | + return; |
| 87 | + } |
| 88 | + auto ncols = ne00; |
| 89 | + const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; |
| 90 | + |
| 91 | + dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; |
| 92 | + |
| 93 | + const int src0_off = i01 * ncols + i00; |
| 94 | + const int ib = src0_off / QK4_0; // block index |
| 95 | + const int iqs = (i00%qk)/qr; // x quant index |
| 96 | + const int iybs = i00 - i00%qk; // dst block start index |
| 97 | + const int y_offset = qr == 1 ? 1 : qk/2; |
| 98 | + |
| 99 | + // dequantize |
| 100 | + dfloat2 v; |
| 101 | + dequantize_kernel_recorder((const void *)src0_dq, ib, (const void *)src0, src0_off/2, v); |
| 102 | + |
| 103 | + dst_row[iybs + iqs + 0] = v.x(); |
| 104 | + dst_row[iybs + iqs + y_offset] = v.y(); |
| 105 | + |
| 106 | + GGML_UNUSED(nb01); |
| 107 | + GGML_UNUSED(nb02); |
| 108 | + GGML_UNUSED(nb03); |
| 109 | +} |
| 110 | + |
| 111 | +template<typename src0_t, typename dst_t> |
| 112 | +static void k_get_rows_float( |
| 113 | + const src0_t * src0, const int32_t * src1, dst_t * dst, |
| 114 | + int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ |
| 115 | + /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ |
| 116 | + /*size_t s0,*/ size_t s1, size_t s2, size_t s3, |
| 117 | + /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, |
| 118 | + size_t s10, size_t s11, size_t s12, |
| 119 | + const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { |
| 120 | + |
| 121 | + const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) + |
| 122 | + item_ct1.get_local_id(2); |
| 123 | + const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + |
| 124 | + item_ct1.get_local_id(1); |
| 125 | + const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 126 | + item_ct1.get_local_id(0)) / |
| 127 | + ne12; |
| 128 | + const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + |
| 129 | + item_ct1.get_local_id(0)) % |
| 130 | + ne12; |
| 131 | + |
| 132 | + if (i00 >= ne00) { |
| 133 | + return; |
| 134 | + } |
| 135 | + |
| 136 | + const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; |
| 137 | + |
| 138 | + dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; |
| 139 | + const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03); |
| 140 | + |
| 141 | + dst_row[i00] = src0_row[i00]; |
| 142 | +} |
| 143 | + |
| 144 | +template <int qk, int qr, dequantize_kernel_t dq> |
| 145 | +static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, |
| 146 | + ggml_tensor *dst, const void *src0_dd, |
| 147 | + const int32_t *src1_dd, float *dst_dd, |
| 148 | + queue_ptr stream) { |
| 149 | + |
| 150 | + GGML_TENSOR_BINARY_OP_LOCALS |
| 151 | + |
| 152 | + const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); |
| 153 | + const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE); |
| 154 | + const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); |
| 155 | + |
| 156 | + // strides in elements |
| 157 | + //const size_t s0 = nb0 / ggml_element_size(dst); |
| 158 | + const size_t s1 = nb1 / ggml_element_size(dst); |
| 159 | + const size_t s2 = nb2 / ggml_element_size(dst); |
| 160 | + const size_t s3 = nb3 / ggml_element_size(dst); |
| 161 | + |
| 162 | + const size_t s10 = nb10 / ggml_element_size(src1); |
| 163 | + const size_t s11 = nb11 / ggml_element_size(src1); |
| 164 | + const size_t s12 = nb12 / ggml_element_size(src1); |
| 165 | + //const size_t s13 = nb13 / ggml_element_size(src1); |
| 166 | + |
| 167 | + GGML_ASSERT(ne00 % 2 == 0); |
| 168 | + |
| 169 | + stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), |
| 170 | + [=](sycl::nd_item<3> item_ct1) { |
| 171 | + k_get_rows<qk, qr, dq>( |
| 172 | + src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, |
| 173 | + s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); |
| 174 | + }); |
| 175 | + |
| 176 | + GGML_UNUSED(dst); |
| 177 | + GGML_UNUSED(ctx); |
| 178 | +} |
| 179 | + |
| 180 | +template <int qk, int qr, dequantize_kernel_t_reorder dq_reorder> |
| 181 | +static void get_rows_sycl_reorder(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, |
| 182 | + ggml_tensor *dst, const void *src0_dd, |
| 183 | + const int32_t *src1_dd, float *dst_dd, |
| 184 | + queue_ptr stream) { |
| 185 | + |
| 186 | + GGML_TENSOR_BINARY_OP_LOCALS |
| 187 | + |
| 188 | + const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); |
| 189 | + const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE); |
| 190 | + const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); |
| 191 | + |
| 192 | + // strides in elements |
| 193 | + //const size_t s0 = nb0 / ggml_element_size(dst); |
| 194 | + const size_t s1 = nb1 / ggml_element_size(dst); |
| 195 | + const size_t s2 = nb2 / ggml_element_size(dst); |
| 196 | + const size_t s3 = nb3 / ggml_element_size(dst); |
| 197 | + |
| 198 | + const size_t s10 = nb10 / ggml_element_size(src1); |
| 199 | + const size_t s11 = nb11 / ggml_element_size(src1); |
| 200 | + const size_t s12 = nb12 / ggml_element_size(src1); |
| 201 | + //const size_t s13 = nb13 / ggml_element_size(src1); |
| 202 | + |
| 203 | + GGML_ASSERT(ne00 % 2 == 0); |
| 204 | + |
| 205 | + const uint8_t* src0_q = (const uint8_t*)src0_dd; |
| 206 | + const size_t ncols = ne00; |
| 207 | + const size_t nrows = ne01; |
| 208 | + const sycl::half* src0_dq = (const sycl::half*)(src0_q + nrows * ncols / 2); |
| 209 | + stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), |
| 210 | + [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]]{ |
| 211 | + k_get_rows_reorder<qk, qr, dq_reorder>( |
| 212 | + src0_dd, src0_dq, src1_dd, dst_dd, ne00, ne12, s1, s2, |
| 213 | + s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); |
| 214 | + }); |
| 215 | + |
| 216 | + GGML_UNUSED(dst); |
| 217 | + GGML_UNUSED(ctx); |
| 218 | +} |
| 219 | + |
| 220 | + |
| 221 | +template <typename src0_t> |
| 222 | +static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, |
| 223 | + const ggml_tensor *src1, ggml_tensor *dst, |
| 224 | + const src0_t *src0_dd, const int32_t *src1_dd, |
| 225 | + float *dst_dd, queue_ptr stream) { |
| 226 | + |
| 227 | + GGML_TENSOR_BINARY_OP_LOCALS |
| 228 | + |
| 229 | + const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); |
| 230 | + const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE; |
| 231 | + const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); |
| 232 | + |
| 233 | + // strides in elements |
| 234 | + //const size_t s0 = nb0 / ggml_element_size(dst); |
| 235 | + const size_t s1 = nb1 / ggml_element_size(dst); |
| 236 | + const size_t s2 = nb2 / ggml_element_size(dst); |
| 237 | + const size_t s3 = nb3 / ggml_element_size(dst); |
| 238 | + |
| 239 | + const size_t s10 = nb10 / ggml_element_size(src1); |
| 240 | + const size_t s11 = nb11 / ggml_element_size(src1); |
| 241 | + const size_t s12 = nb12 / ggml_element_size(src1); |
| 242 | + //const size_t s13 = nb13 / ggml_element_size(src1); |
| 243 | + |
| 244 | + { |
| 245 | + dpct::has_capability_or_fail(stream->get_device(), |
| 246 | + {sycl::aspect::fp16}); |
| 247 | + |
| 248 | + stream->parallel_for( |
| 249 | + sycl::nd_range<3>(block_nums * block_dims, block_dims), |
| 250 | + [=](sycl::nd_item<3> item_ct1) { |
| 251 | + k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, |
| 252 | + s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); |
| 253 | + }); |
| 254 | + } |
| 255 | + |
| 256 | + GGML_UNUSED(dst); |
| 257 | + GGML_UNUSED(ctx); |
| 258 | +} |
| 259 | + |
| 260 | +void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, |
| 261 | + const ggml_tensor *src1, ggml_tensor *dst, |
| 262 | + const float *src0_d, const float *src1_d, |
| 263 | + float *dst_d, const queue_ptr &stream) { |
| 264 | + |
| 265 | + GGML_ASSERT(src1->type == GGML_TYPE_I32); |
| 266 | + GGML_ASSERT(dst->type == GGML_TYPE_F32); |
| 267 | + |
| 268 | + GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); |
| 269 | + GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); |
| 270 | + GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); |
| 271 | + |
| 272 | + const int32_t * src1_i32 = (const int32_t *) src1_d; |
| 273 | + |
| 274 | + switch (src0->type) { |
| 275 | + case GGML_TYPE_F16: |
| 276 | + get_rows_sycl_float(ctx, src0, src1, dst, (const sycl::half *)src0_d, |
| 277 | + src1_i32, dst_d, stream); |
| 278 | + break; |
| 279 | + case GGML_TYPE_F32: |
| 280 | + get_rows_sycl_float(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 281 | + break; |
| 282 | + case GGML_TYPE_Q4_0: |
| 283 | + if (ctx.opt_feature.reorder && dst->op == GGML_OP_MUL_MAT) { |
| 284 | + get_rows_sycl_reorder<QK4_0, QR4_0, dequantize_q4_0_reorder>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 285 | + } else { |
| 286 | + get_rows_sycl<QK4_0, QR4_0, dequantize_q4_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 287 | + } |
| 288 | + break; |
| 289 | + case GGML_TYPE_Q4_1: |
| 290 | + get_rows_sycl<QK4_1, QR4_1, dequantize_q4_1>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 291 | + break; |
| 292 | + case GGML_TYPE_Q5_0: |
| 293 | + get_rows_sycl<QK5_0, QR5_0, dequantize_q5_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 294 | + break; |
| 295 | + case GGML_TYPE_Q5_1: |
| 296 | + get_rows_sycl<QK5_1, QR5_1, dequantize_q5_1>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 297 | + break; |
| 298 | + case GGML_TYPE_Q8_0: |
| 299 | + get_rows_sycl<QK8_0, QR8_0, dequantize_q8_0>(ctx, src0, src1, dst, src0_d, src1_i32, dst_d, stream); |
| 300 | + break; |
| 301 | + default: |
| 302 | + // TODO: k-quants |
| 303 | + GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(src0->type)); |
| 304 | + GGML_ABORT("fatal error"); |
| 305 | + break; |
| 306 | + } |
| 307 | +} |
| 308 | + |
0 commit comments