@@ -387,6 +387,7 @@ struct ggml_backend_opencl_context {
387387 cl_program program_tanh;
388388 cl_program program_upscale;
389389 cl_program program_concat;
390+ cl_program program_conv_2d;
390391 cl_program program_tsembd;
391392 cl_program program_mul_mv_id_q4_0_f32_8x_flat;
392393
@@ -433,6 +434,7 @@ struct ggml_backend_opencl_context {
433434 cl_kernel kernel_upscale_bilinear;
434435 cl_kernel kernel_concat_f32_contiguous;
435436 cl_kernel kernel_concat_f32_non_contiguous;
437+ cl_kernel kernel_conv_2d;
436438 cl_kernel kernel_timestep_embedding;
437439 cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
438440
@@ -1400,6 +1402,27 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
14001402 }
14011403 }
14021404
1405+ // conv2d
1406+ {
1407+ #ifdef GGML_OPENCL_EMBED_KERNELS
1408+ const std::string kernel_src {
1409+ #include " conv2d.cl.h"
1410+ };
1411+ #else
1412+ const std::string kernel_src = read_file (" conv2d.cl" );
1413+ #endif
1414+ if (!kernel_src.empty ()) {
1415+ backend_ctx->program_conv_2d =
1416+ build_program_from_source (backend_ctx->context , backend_ctx->device , kernel_src.c_str (), compile_opts);
1417+ CL_CHECK ((backend_ctx->kernel_conv_2d = clCreateKernel (backend_ctx->program_conv_2d , " kernel_conv_2d" , &err), err));
1418+ GGML_LOG_CONT (" ." );
1419+ } else {
1420+ GGML_LOG_WARN (" ggml_opencl: conv2d kernel source not found or empty. This op will not be available.\n " );
1421+ backend_ctx->program_conv_2d = nullptr ;
1422+ backend_ctx->kernel_conv_2d = nullptr ;
1423+ }
1424+ }
1425+
14031426 // mul_mv_id_q4_0_f32_8x_flat
14041427 {
14051428#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -2255,6 +2278,8 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
22552278 op->src [0 ]->ne [3 ] == 1 && op->ne [3 ] == 1 ;
22562279 case GGML_OP_UPSCALE:
22572280 return op->src [0 ]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32;
2281+ case GGML_OP_CONV_2D:
2282+ return op->src [0 ]->type == GGML_TYPE_F32 && op->src [1 ]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32;
22582283 case GGML_OP_CONCAT:
22592284 return op->src [0 ]->type == GGML_TYPE_F32 && op->src [1 ]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32;
22602285 case GGML_OP_TIMESTEP_EMBEDDING:
@@ -4685,6 +4710,73 @@ static void ggml_cl_timestep_embedding(ggml_backend_t backend, const ggml_tensor
46854710 backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, NULL , dst);
46864711}
46874712
4713+ static void ggml_cl_conv_2d (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
4714+ GGML_TENSOR_BINARY_OP_LOCALS;
4715+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context ;
4716+
4717+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra ;
4718+ ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra ;
4719+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra ;
4720+
4721+ cl_ulong offset0 = extra0->offset + src0->view_offs ;
4722+ cl_ulong offset1 = extra1->offset + src1->view_offs ;
4723+ cl_ulong offsetd = extrad->offset + dst->view_offs ;
4724+
4725+ const cl_uint Cout = ne03; const cl_uint Cin = ne02; const cl_uint N = ne13;
4726+ const cl_uint KW = ne00; const cl_uint KH = ne01; const cl_uint W = ne10; const cl_uint H = ne11; const cl_uint OW = ne0; const cl_uint OH = ne1;
4727+
4728+ const cl_uint s0 = dst->op_params [0 ]; const cl_uint s1 = dst->op_params [1 ];
4729+ const cl_uint p0 = dst->op_params [2 ]; const cl_uint p1 = dst->op_params [3 ];
4730+ const cl_uint d0 = dst->op_params [4 ]; const cl_uint d1 = dst->op_params [5 ];
4731+
4732+ const cl_uint cl_nb01 = nb01/nb00; const cl_uint cl_nb02 = nb02/nb00; const cl_uint cl_nb03 = nb03/nb00;
4733+ const cl_uint cl_nb11 = nb11/nb10; const cl_uint cl_nb12 = nb12/nb10; const cl_uint cl_nb13 = nb13/nb10;
4734+ const cl_uint cl_nb1 = nb1/nb0; const cl_uint cl_nb2 = nb2/nb0; const cl_uint cl_nb3 = nb3/nb0;
4735+
4736+ const int64_t NPQ = (int64_t )N * OW * OH;
4737+
4738+ const uint32_t WG_SIZE = 128 ;
4739+ const uint32_t BS_K = 128 ;
4740+ const uint32_t BS_CRS = 16 ;
4741+ const uint32_t BS_NPQ = 64 ;
4742+ const uint32_t VEC_SIZE = 4 ;
4743+
4744+ auto splitWork = [](uint32_t work_size, uint32_t block_size) { return (block_size + work_size - 1 ) / block_size; };
4745+ const uint32_t NB_K = splitWork (Cout, BS_K);
4746+ const uint32_t NB_NPQ = splitWork (NPQ, BS_NPQ);
4747+
4748+ const size_t shmem_size = (size_t )(BS_K * (BS_CRS + 1 ) * sizeof (cl_half) + BS_CRS * (BS_NPQ / VEC_SIZE + 1 ) * sizeof (cl_half4));
4749+
4750+ cl_kernel kernel = backend_ctx->kernel_conv_2d ;
4751+ cl_uint idx = 0 ;
4752+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_mem), &extra0->data_device )); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_ulong), &offset0));
4753+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_mem), &extra1->data_device )); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_ulong), &offset1));
4754+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_mem), &extrad->data_device )); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_ulong), &offsetd));
4755+ CL_CHECK (clSetKernelArg (kernel, idx++, shmem_size, NULL ));
4756+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &Cout)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &Cin)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &N));
4757+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &KW)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &KH)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &W)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &H));
4758+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &OW)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &OH));
4759+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &s0)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &s1)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &p0)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &p1));
4760+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &d0)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &d1));
4761+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb01)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb02)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb03));
4762+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb11)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb12)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb13));
4763+ CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb1)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb2)); CL_CHECK (clSetKernelArg (kernel, idx++, sizeof (cl_uint), &cl_nb3));
4764+
4765+ size_t global_work_size[] = { (size_t )NB_K * WG_SIZE, (size_t )NB_NPQ, 1 };
4766+ size_t local_work_size[] = { (size_t )WG_SIZE, 1 , 1 };
4767+
4768+ #ifdef GGML_OPENCL_PROFILING
4769+ cl_event evt;
4770+ CL_CHECK (clEnqueueNDRangeKernel (backend_ctx->queue , kernel, 3 , NULL , global_work_size, local_work_size, 0 , NULL , &evt));
4771+
4772+ backend_ctx->profiling_info .emplace_back ();
4773+ populateProfilingInfo (backend_ctx->profiling_info .back (), evt, kernel, 3 , global_work_size, local_work_size, dst);
4774+ #else
4775+ GGML_UNUSED (dst);
4776+ CL_CHECK (clEnqueueNDRangeKernel (backend_ctx->queue , kernel, 3 , NULL , global_work_size, local_work_size, 0 , NULL , NULL ));
4777+ #endif
4778+ }
4779+
46884780static void ggml_cl_mul_mat (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
46894781 GGML_ASSERT (src0);
46904782 GGML_ASSERT (src0->extra );
@@ -6286,6 +6378,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor
62866378 }
62876379 ggml_cl_upscale (backend, tensor->src [0 ], tensor);
62886380 return true ;
6381+ case GGML_OP_CONV_2D:
6382+ if (!any_on_device) {
6383+ return false ;
6384+ }
6385+ func = ggml_cl_conv_2d;
6386+ break ;
62896387 case GGML_OP_CONCAT:
62906388 if (!any_on_device) {
62916389 return false ;
0 commit comments