@@ -345,6 +345,7 @@ struct ggml_backend_opencl_context {
345
345
cl_command_queue queue;
346
346
347
347
cl_program program_add;
348
+ cl_program program_add_id;
348
349
cl_program program_clamp;
349
350
cl_program program_cpy;
350
351
cl_program program_cvt;
@@ -404,6 +405,7 @@ struct ggml_backend_opencl_context {
404
405
cl_kernel kernel_mul, kernel_mul_row, kernel_mul_f16, kernel_mul_row_f16;
405
406
cl_kernel kernel_div, kernel_div_row, kernel_div_f16, kernel_div_row_f16;
406
407
cl_kernel kernel_sub, kernel_sub_row, kernel_sub_f16, kernel_sub_row_f16;
408
+ cl_kernel kernel_add_id;
407
409
cl_kernel kernel_scale;
408
410
cl_kernel kernel_silu, kernel_silu_4;
409
411
cl_kernel kernel_gelu, kernel_gelu_4;
@@ -412,7 +414,7 @@ struct ggml_backend_opencl_context {
412
414
cl_kernel kernel_relu;
413
415
cl_kernel kernel_sigmoid_f32, kernel_sigmoid_f16;
414
416
cl_kernel kernel_clamp;
415
- cl_kernel kernel_geglu, kernel_reglu, kernel_swiglu, kernel_geglu_erf, kernel_geglu_quick,
417
+ cl_kernel kernel_geglu, kernel_reglu, kernel_swiglu, kernel_swiglu_oai, kernel_geglu_erf, kernel_geglu_quick,
416
418
kernel_geglu_f16, kernel_reglu_f16, kernel_swiglu_f16, kernel_geglu_erf_f16, kernel_geglu_quick_f16;
417
419
cl_kernel kernel_norm;
418
420
cl_kernel kernel_rms_norm, kernel_rms_norm_mul;
@@ -681,6 +683,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
681
683
GGML_LOG_CONT (" ." );
682
684
}
683
685
686
+ // add_id
687
+ {
688
+ #ifdef GGML_OPENCL_EMBED_KERNELS
689
+ const std::string kernel_src {
690
+ #include " add_id.cl.h"
691
+ };
692
+ #else
693
+ const std::string kernel_src = read_file (" add_id.cl" );
694
+ #endif
695
+ backend_ctx->program_add_id =
696
+ build_program_from_source (backend_ctx->context , backend_ctx->device , kernel_src.c_str (), compile_opts);
697
+
698
+ CL_CHECK ((backend_ctx->kernel_add_id = clCreateKernel (backend_ctx->program_add_id , " kernel_add_id" , &err), err));
699
+ GGML_LOG_CONT (" ." );
700
+ }
701
+
684
702
// clamp
685
703
{
686
704
#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -787,6 +805,7 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
787
805
CL_CHECK ((backend_ctx->kernel_geglu = clCreateKernel (backend_ctx->program_glu , " kernel_geglu" , &err), err));
788
806
CL_CHECK ((backend_ctx->kernel_reglu = clCreateKernel (backend_ctx->program_glu , " kernel_reglu" , &err), err));
789
807
CL_CHECK ((backend_ctx->kernel_swiglu = clCreateKernel (backend_ctx->program_glu , " kernel_swiglu" , &err), err));
808
+ CL_CHECK ((backend_ctx->kernel_swiglu_oai = clCreateKernel (backend_ctx->program_glu , " kernel_swiglu_oai" , &err), err));
790
809
CL_CHECK ((backend_ctx->kernel_geglu_erf = clCreateKernel (backend_ctx->program_glu , " kernel_geglu_erf" , &err), err));
791
810
CL_CHECK ((backend_ctx->kernel_geglu_quick = clCreateKernel (backend_ctx->program_glu , " kernel_geglu_quick" , &err), err));
792
811
CL_CHECK ((backend_ctx->kernel_geglu_f16 = clCreateKernel (backend_ctx->program_glu , " kernel_geglu_f16" , &err), err));
@@ -2467,6 +2486,8 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
2467
2486
return (op->src [0 ]->type == op->src [1 ]->type ) &&
2468
2487
(op->src [0 ]->type == op->type ) &&
2469
2488
(op->src [0 ]->type == GGML_TYPE_F32 || op->src [0 ]->type == GGML_TYPE_F16);
2489
+ case GGML_OP_ADD_ID:
2490
+ return op->src [0 ]->type == GGML_TYPE_F32;
2470
2491
case GGML_OP_UNARY:
2471
2492
switch (ggml_get_unary_op (op)) {
2472
2493
case GGML_UNARY_OP_GELU:
@@ -2488,6 +2509,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
2488
2509
case GGML_GLU_OP_GEGLU:
2489
2510
case GGML_GLU_OP_REGLU:
2490
2511
case GGML_GLU_OP_SWIGLU:
2512
+ case GGML_GLU_OP_SWIGLU_OAI:
2491
2513
case GGML_GLU_OP_GEGLU_ERF:
2492
2514
case GGML_GLU_OP_GEGLU_QUICK:
2493
2515
return ggml_is_contiguous_1 (op->src [0 ]) && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
@@ -3824,6 +3846,75 @@ static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const
3824
3846
}
3825
3847
}
3826
3848
3849
+ static void ggml_cl_add_id (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
3850
+ GGML_ASSERT (src0);
3851
+ GGML_ASSERT (src0->extra );
3852
+ GGML_ASSERT (src1);
3853
+ GGML_ASSERT (src1->extra );
3854
+ GGML_ASSERT (dst);
3855
+ GGML_ASSERT (dst->extra );
3856
+
3857
+ const ggml_tensor * src2 = dst->src [2 ];
3858
+ GGML_ASSERT (src2);
3859
+ GGML_ASSERT (src2->extra );
3860
+
3861
+ GGML_ASSERT (src0->type == GGML_TYPE_F32);
3862
+ GGML_ASSERT (src1->type == GGML_TYPE_F32);
3863
+ GGML_ASSERT (src2->type == GGML_TYPE_I32);
3864
+ GGML_ASSERT (dst->type == GGML_TYPE_F32);
3865
+
3866
+ GGML_ASSERT (ggml_is_contiguous_rows (src0));
3867
+
3868
+ const int ne00 = src0->ne [0 ];
3869
+ const int ne01 = src0->ne [1 ];
3870
+ const int ne02 = src0->ne [2 ];
3871
+
3872
+ const cl_ulong nb01 = src0->nb [1 ];
3873
+ const cl_ulong nb02 = src0->nb [2 ];
3874
+
3875
+ const cl_ulong nb11 = src1->nb [1 ];
3876
+
3877
+ const cl_ulong nb21 = src2->nb [1 ];
3878
+
3879
+ const int ne0 = dst->ne [0 ];
3880
+ const int ne1 = dst->ne [1 ];
3881
+
3882
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context ;
3883
+
3884
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra ;
3885
+ ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra ;
3886
+ ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra ;
3887
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra ;
3888
+
3889
+ cl_ulong offset0 = extra0->offset + src0->view_offs ;
3890
+ cl_ulong offset1 = extra1->offset + src1->view_offs ;
3891
+ cl_ulong offset2 = extra2->offset + src2->view_offs ;
3892
+ cl_ulong offsetd = extrad->offset + dst->view_offs ;
3893
+
3894
+ cl_kernel kernel = backend_ctx->kernel_add_id ;
3895
+
3896
+ CL_CHECK (clSetKernelArg (kernel, 0 , sizeof (cl_mem), &extra0->data_device ));
3897
+ CL_CHECK (clSetKernelArg (kernel, 1 , sizeof (cl_ulong), &offset0));
3898
+ CL_CHECK (clSetKernelArg (kernel, 2 , sizeof (cl_mem), &extra1->data_device ));
3899
+ CL_CHECK (clSetKernelArg (kernel, 3 , sizeof (cl_ulong), &offset1));
3900
+ CL_CHECK (clSetKernelArg (kernel, 4 , sizeof (cl_mem), &extra2->data_device ));
3901
+ CL_CHECK (clSetKernelArg (kernel, 5 , sizeof (cl_ulong), &offset2));
3902
+ CL_CHECK (clSetKernelArg (kernel, 6 , sizeof (cl_mem), &extrad->data_device ));
3903
+ CL_CHECK (clSetKernelArg (kernel, 7 , sizeof (cl_ulong), &offsetd));
3904
+ CL_CHECK (clSetKernelArg (kernel, 8 , sizeof (cl_ulong), &nb01));
3905
+ CL_CHECK (clSetKernelArg (kernel, 9 , sizeof (cl_ulong), &nb02));
3906
+ CL_CHECK (clSetKernelArg (kernel, 10 , sizeof (cl_ulong), &nb11));
3907
+ CL_CHECK (clSetKernelArg (kernel, 11 , sizeof (cl_ulong), &nb21));
3908
+ CL_CHECK (clSetKernelArg (kernel, 12 , sizeof (int ), &ne0));
3909
+ CL_CHECK (clSetKernelArg (kernel, 13 , sizeof (int ), &ne1));
3910
+
3911
+ int nth = MIN (ne00, (int ) backend_ctx->get_kernel_workgroup_size (kernel));
3912
+ size_t global_work_size[] = { (size_t )ne01*nth, (size_t )ne02, 1 };
3913
+ size_t local_work_size[] = { (size_t )nth, 1 , 1 };
3914
+
3915
+ backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, local_work_size, dst);
3916
+ }
3917
+
3827
3918
static void ggml_cl_mul (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
3828
3919
GGML_ASSERT (src0);
3829
3920
GGML_ASSERT (src0->extra );
@@ -7005,6 +7096,9 @@ static void ggml_cl_glu(ggml_backend_t backend, const ggml_tensor * src0, const
7005
7096
kernel = backend_ctx->kernel_swiglu_f16 ;
7006
7097
}
7007
7098
break ;
7099
+ case GGML_GLU_OP_SWIGLU_OAI:
7100
+ kernel = backend_ctx->kernel_swiglu_oai ;
7101
+ break ;
7008
7102
case GGML_GLU_OP_GEGLU_ERF:
7009
7103
if (dst->type == GGML_TYPE_F32) {
7010
7104
kernel = backend_ctx->kernel_geglu_erf ;
@@ -7040,7 +7134,10 @@ static void ggml_cl_glu(ggml_backend_t backend, const ggml_tensor * src0, const
7040
7134
7041
7135
const cl_ulong nb1 = dst->nb [1 ];
7042
7136
7043
- const int swp = ((const int32_t *) dst->op_params )[1 ];
7137
+ const int swp = ggml_get_op_params_i32 (dst, 1 );
7138
+ const float alpha = ggml_get_op_params_f32 (dst, 2 );
7139
+ const float limit = ggml_get_op_params_f32 (dst, 3 );
7140
+
7044
7141
const int ne00_off = src1 ? 0 : (swp ? ne0 : 0 );
7045
7142
const int ne10_off = src1 ? 0 : (swp ? 0 : ne0);
7046
7143
@@ -7057,6 +7154,11 @@ static void ggml_cl_glu(ggml_backend_t backend, const ggml_tensor * src0, const
7057
7154
CL_CHECK (clSetKernelArg (kernel, 10 , sizeof (int ), &ne00_off));
7058
7155
CL_CHECK (clSetKernelArg (kernel, 11 , sizeof (int ), &ne10_off));
7059
7156
7157
+ if (ggml_get_glu_op (dst) == GGML_GLU_OP_SWIGLU_OAI) {
7158
+ CL_CHECK (clSetKernelArg (kernel, 12 , sizeof (float ), &limit));
7159
+ CL_CHECK (clSetKernelArg (kernel, 13 , sizeof (float ), &alpha));
7160
+ }
7161
+
7060
7162
const size_t nrows = ggml_nrows (src0);
7061
7163
size_t nth = 512 ;
7062
7164
size_t global_work_size[] = {nrows*nth, 1 , 1 };
@@ -7113,6 +7215,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor
7113
7215
}
7114
7216
func = ggml_cl_add;
7115
7217
break ;
7218
+ case GGML_OP_ADD_ID:
7219
+ if (!any_on_device) {
7220
+ return false ;
7221
+ }
7222
+ func = ggml_cl_add_id;
7223
+ break ;
7116
7224
case GGML_OP_MUL:
7117
7225
if (!any_on_device) {
7118
7226
return false ;
0 commit comments