Skip to content

Commit a302cc2

Browse files
committed
opencl: add flattened q8_0 mv_id
1 parent 124dd54 commit a302cc2

File tree

3 files changed

+203
-4
lines changed

3 files changed

+203
-4
lines changed

ggml/src/ggml-opencl/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ set(GGML_OPENCL_KERNELS
8787
mul_mv_mxfp4_f32
8888
mul_mv_id_q4_0_f32_8x_flat
8989
mul_mv_id_q8_0_f32
90+
mul_mv_id_q8_0_f32_flat
9091
mul_mv_id_mxfp4_f32
9192
mul_mm_f32_f32_l4_lm
9293
mul_mm_f16_f32_l4_lm

ggml/src/ggml-opencl/ggml-opencl.cpp

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ struct ggml_backend_opencl_context {
402402
cl_program program_conv_2d_f16_f32;
403403
cl_program program_tsembd;
404404
cl_program program_mul_mv_id_q4_0_f32_8x_flat;
405-
cl_program program_mul_mv_id_q8_0_f32;
405+
cl_program program_mul_mv_id_q8_0_f32, program_mul_mv_id_q8_0_f32_flat;
406406
cl_program program_mul_mv_id_mxfp4_f32;
407407
cl_program program_mul_mm_f32_f32_l4_lm;
408408
cl_program program_mul_mm_f16_f32_l4_lm;
@@ -472,7 +472,7 @@ struct ggml_backend_opencl_context {
472472
cl_kernel kernel_conv_2d_f16_f32;
473473
cl_kernel kernel_timestep_embedding;
474474
cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
475-
cl_kernel kernel_mul_mv_id_q8_0_f32;
475+
cl_kernel kernel_mul_mv_id_q8_0_f32, kernel_mul_mv_id_q8_0_f32_flat;
476476
cl_kernel kernel_mul_mv_id_mxfp4_f32;
477477
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
478478
cl_kernel kernel_mul_mm_f16_f32_l4_lm;
@@ -1766,6 +1766,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
17661766
GGML_LOG_CONT(".");
17671767
}
17681768

1769+
// mul_mv_id_q8_0_f32_flat
1770+
{
1771+
#ifdef GGML_OPENCL_EMBED_KERNELS
1772+
const std::string kernel_src {
1773+
#include "mul_mv_id_q8_0_f32_flat.cl.h"
1774+
};
1775+
#else
1776+
const std::string kernel_src = read_file("mul_mv_id_q8_0_f32_flat.cl");
1777+
#endif
1778+
backend_ctx->program_mul_mv_id_q8_0_f32_flat =
1779+
build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
1780+
1781+
CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32_flat, "kernel_mul_mv_id_q8_0_f32_flat", &err), err));
1782+
GGML_LOG_CONT(".");
1783+
}
1784+
17691785
// mul_mv_id_mxfp4_f32
17701786
{
17711787
#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -7133,6 +7149,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
71337149

71347150
#ifdef GGML_OPENCL_SOA_Q
71357151
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
7152+
ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
71367153
#endif
71377154

71387155
const int ne00 = src0->ne[0];
@@ -7221,6 +7238,43 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
72217238
break;
72227239
}
72237240
case GGML_TYPE_Q8_0: {
7241+
#ifdef GGML_OPENCL_SOA_Q
7242+
kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32_flat;
7243+
7244+
if (backend_ctx->gpu_family == INTEL) {
7245+
sgs = 16;
7246+
nsg = 2;
7247+
ndst = 4;
7248+
} else if (backend_ctx->gpu_family == ADRENO) {
7249+
sgs = 64;
7250+
nsg = 2;
7251+
ndst = 4;
7252+
} else {
7253+
GGML_ASSERT(false && "TODO: Unknown GPU");
7254+
}
7255+
7256+
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q));
7257+
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d));
7258+
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
7259+
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
7260+
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
7261+
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
7262+
CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
7263+
CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
7264+
CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
7265+
CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01));
7266+
CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01));
7267+
CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02));
7268+
CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
7269+
CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
7270+
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
7271+
CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
7272+
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20));
7273+
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21));
7274+
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
7275+
CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
7276+
CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
7277+
#else
72247278
kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32;
72257279

72267280
if (backend_ctx->gpu_family == INTEL) {
@@ -7230,7 +7284,6 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
72307284
} else if (backend_ctx->gpu_family == ADRENO) {
72317285
sgs = 64;
72327286
nsg = 2;
7233-
ndst = 8;
72347287
ndst = 4;
72357288
} else {
72367289
GGML_ASSERT(false && "TODO: Unknown GPU");
@@ -7257,7 +7310,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
72577310
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
72587311
CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
72597312
CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
7260-
7313+
#endif // GGML_OPENCL_SOA_Q
72617314
break;
72627315
}
72637316
case GGML_TYPE_MXFP4: {
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
2+
3+
#ifdef cl_intel_subgroups
4+
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
5+
#else
6+
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
7+
#endif
8+
9+
#ifdef cl_intel_required_subgroup_size
10+
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
11+
#define INTEL_GPU 1
12+
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
13+
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
14+
#elif defined(cl_qcom_reqd_sub_group_size)
15+
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
16+
#define ADRENO_GPU 1
17+
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
18+
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
19+
#endif
20+
21+
#define QK8_0 32
22+
typedef struct {
23+
half d; // delta
24+
char qs[QK8_0]; // quants
25+
} block_q8_0;
26+
27+
#define NB_Q8_0 8
28+
29+
#ifdef INTEL_GPU
30+
#define N_R0_Q8_0 4 // number of rows each subgroup works on
31+
#define N_SG_Q8_0 2 // number of subgroups in a work group
32+
#define N_SIMDWIDTH 16 // subgroup size
33+
#elif defined (ADRENO_GPU)
34+
#define N_R0_Q8_0 4
35+
#define N_SG_Q8_0 2
36+
#define N_SIMDWIDTH 64
37+
#endif
38+
39+
#ifdef INTEL_GPU
40+
REQD_SUBGROUP_SIZE_16
41+
#elif defined (ADRENO_GPU)
42+
REQD_SUBGROUP_SIZE_64
43+
#endif
44+
kernel void kernel_mul_mv_id_q8_0_f32_flat(
45+
global char * src0_q,
46+
global half * src0_d,
47+
global char * src1,
48+
ulong offset1,
49+
global char * src2,
50+
ulong offset2,
51+
global char * dst,
52+
ulong offsetd,
53+
int ne00,
54+
int ne01,
55+
ulong nb01,
56+
ulong nb02,
57+
int ne11,
58+
int ne12,
59+
ulong nb11,
60+
ulong nb12,
61+
int ne20,
62+
int ne21,
63+
ulong nb21,
64+
int ne0,
65+
int ne1
66+
) {
67+
src1 = (global char *)((global char *)src1 + offset1);
68+
src2 = (global char *)((global char *)src2 + offset2);
69+
dst = (global char *)((global char *)dst + offsetd);
70+
71+
int iid1 = get_group_id(2)/ne20;
72+
int idx = get_group_id(2)%ne20;
73+
74+
int i02 = ((global int *) (src2 + iid1*nb21))[idx];
75+
76+
int i11_ = idx % ne11;
77+
int i12_ = iid1;
78+
79+
int i1 = idx;
80+
int i2 = i12_;
81+
82+
// 34 == sizeof(block_q8_0)
83+
ulong src0_off = i02*nb02/34;
84+
85+
global char * src0_q_cur = src0_q + src0_off*sizeof(char)*QK8_0;
86+
global half * src0_d_cur = src0_d + src0_off;
87+
global char * src1_cur = src1 + i11_*nb11 + i12_*nb12;
88+
89+
global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float);
90+
91+
int nb = ne00/QK8_0;
92+
93+
int r0 = get_group_id(0);
94+
int r1 = get_group_id(1);
95+
96+
int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0;
97+
98+
ulong offset_src1 = r1*nb11;
99+
global float * y = (global float *) (src1_cur + offset_src1);
100+
101+
// pointers to src0 rows
102+
global char * ax[N_R0_Q8_0];
103+
global half * ad[N_R0_Q8_0];
104+
for (int row = 0; row < N_R0_Q8_0; ++row) {
105+
ulong offset_src0 = (first_row + row)*nb01/34;
106+
ax[row] = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0);
107+
ad[row] = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half));
108+
}
109+
110+
float yl[NB_Q8_0];
111+
float sumf[N_R0_Q8_0] = { 0.f };
112+
113+
const short ix = get_sub_group_local_id()/4;
114+
const short il = get_sub_group_local_id()%4;
115+
116+
global float * yb = y + ix*QK8_0 + il*NB_Q8_0;
117+
118+
// each thread handles NB_Q8_0 quants at a time
119+
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) {
120+
for (short i = 0; i < NB_Q8_0; ++i) {
121+
yl[i] = yb[i];
122+
}
123+
124+
for (short row = 0; row < N_R0_Q8_0; row++) {
125+
global char * qs = ax[row] + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
126+
float sumq = 0.f;
127+
for (short iq = 0; iq < NB_Q8_0; ++iq) {
128+
sumq += qs[iq] * yl[iq];
129+
}
130+
sumf[row] += sumq*ad[row][ib];
131+
}
132+
133+
yb += N_SIMDWIDTH*NB_Q8_0;
134+
}
135+
136+
global float * dst_f32 = (global float *) dst_cur + (ulong)r1*ne0;
137+
138+
for (int row = 0; row < N_R0_Q8_0; ++row) {
139+
float tot = sub_group_reduce_add(sumf[row]);
140+
141+
if (get_sub_group_local_id() == 0 && first_row + row < ne01) {
142+
dst_f32[first_row + row] = tot;
143+
}
144+
}
145+
}

0 commit comments

Comments
 (0)