@@ -65,37 +65,3 @@ int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block
6565 }
6666 return sycl_down_blk_size;
6767}
68-
69- void ggml_sycl_op_flatten (ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
70- const ggml_tensor *src1, ggml_tensor *dst,
71- const ggml_sycl_op_flatten_t op) try {
72-
73- const bool use_src1 = src1 != nullptr ;
74- if (use_src1)
75- GGML_ASSERT (strcmp (src1->buffer ->buft ->iface .get_name (src1->buffer ->buft ), GGML_SYCL_NAME " _Split" ) != 0 );
76- GGML_ASSERT (strcmp (dst->buffer ->buft ->iface .get_name (dst->buffer ->buft ), GGML_SYCL_NAME " _Split" ) != 0 );
77-
78- // dd = data device
79- float * src0_ddf = (float *) src0->data ;
80- float * src1_ddf = use_src1 ? (float *) src1->data : nullptr ;
81- float * dst_ddf = (float *) dst->data ;
82-
83- ggml_sycl_pool_alloc<float > src0_f (ctx.pool ());
84- ggml_sycl_pool_alloc<float > src1_f (ctx.pool ());
85- ggml_sycl_pool_alloc<float > dst_f (ctx.pool ());
86-
87- ggml_sycl_set_device (ctx.device );
88- queue_ptr main_stream = ctx.stream ();
89- // GGML_SYCL_DEBUG("ctx.device=%d, main_stream=%p src0_on_device=%d, src1_on_device=%d, dst_on_device=%d\n",
90- // ctx.device, main_stream, src0_on_device, src1_on_device, dst_on_device);
91-
92- // do the computation
93- op (ctx, src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream);
94- // print_ggml_tensor("tensor", dst);
95- }
96- catch (sycl::exception const &exc) {
97-
98- std::cerr << exc.what () << " Exception caught at file:" << __FILE__
99- << " , line:" << __LINE__ << std::endl;
100- std::exit (1 );
101- }
0 commit comments