33
44using namespace sycl ;
55
6- static void kernel_roll_multi_axis (queue &q, const ggml_tensor *src, ggml_tensor *dst,
6+ static void kernel_roll_multi_axis (queue &q, const ggml_tensor *src, ggml_tensor *dst,
77 int shift0, int shift1, int shift2, int shift3) {
88 if (!src || !dst) throw std::runtime_error (" null tensor" );
99 if (src->type != GGML_TYPE_F32 || dst->type != GGML_TYPE_F32)
1010 throw std::runtime_error (" only F32 supported in SYCL roll" );
11-
11+
1212 const int64_t ne0 = dst->ne [0 ];
1313 const int64_t ne1 = dst->ne [1 ];
1414 const int64_t ne2 = dst->ne [2 ];
@@ -25,7 +25,7 @@ static void kernel_roll_multi_axis(queue &q, const ggml_tensor *src, ggml_tensor
2525
2626 const float *src_d = (const float *) src->data ;
2727 float *dst_d = (float *) dst->data ;
28-
28+
2929 if (!src_d || !dst_d) throw std::runtime_error (" null data pointers" );
3030
3131 q.submit ([&](handler &h) {
@@ -44,7 +44,7 @@ static void kernel_roll_multi_axis(queue &q, const ggml_tensor *src, ggml_tensor
4444 const int64_t src_i2 = (i2 - sh2 + ne2) % ne2;
4545 const int64_t src_i3 = (i3 - sh3 + ne3) % ne3;
4646
47- const int64_t idx_src = src_i0 + src_i1 * ne0 +
47+ const int64_t idx_src = src_i0 + src_i1 * ne0 +
4848 src_i2 * ne0 * ne1 + src_i3 * ne0 * ne1 * ne2;
4949
5050 dst_d[idx_dst] = src_d[idx_src];
@@ -55,9 +55,9 @@ static void kernel_roll_multi_axis(queue &q, const ggml_tensor *src, ggml_tensor
5555
5656void ggml_sycl_roll (ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
5757 GGML_ASSERT (dst->type == GGML_TYPE_F32);
58-
58+
5959 const ggml_tensor *src = dst->src [0 ];
60-
60+
6161 const int32_t *params = (const int32_t *)dst->op_params ;
6262 const int shift0 = params[0 ];
6363 const int shift1 = params[1 ];
0 commit comments