Skip to content

Commit 0cb2933

Browse files
committed
SYCL: remove ggml_sycl_op_flatten function
1 parent 553f1e4 commit 0cb2933

File tree

12 files changed

+465
-688
lines changed

12 files changed

+465
-688
lines changed

ggml/src/ggml-sycl/common.cpp

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -65,37 +65,3 @@ int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block
6565
}
6666
return sycl_down_blk_size;
6767
}
68-
69-
void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
70-
const ggml_tensor *src1, ggml_tensor *dst,
71-
const ggml_sycl_op_flatten_t op) try {
72-
73-
const bool use_src1 = src1 != nullptr;
74-
if(use_src1)
75-
GGML_ASSERT(strcmp(src1->buffer->buft->iface.get_name(src1->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
76-
GGML_ASSERT(strcmp(dst->buffer->buft->iface.get_name(dst->buffer->buft), GGML_SYCL_NAME "_Split") != 0);
77-
78-
// dd = data device
79-
float * src0_ddf = (float *) src0->data;
80-
float * src1_ddf = use_src1 ? (float *) src1->data : nullptr;
81-
float * dst_ddf = (float *) dst->data;
82-
83-
ggml_sycl_pool_alloc<float> src0_f(ctx.pool());
84-
ggml_sycl_pool_alloc<float> src1_f(ctx.pool());
85-
ggml_sycl_pool_alloc<float> dst_f(ctx.pool());
86-
87-
ggml_sycl_set_device(ctx.device);
88-
queue_ptr main_stream = ctx.stream();
89-
// GGML_SYCL_DEBUG("ctx.device=%d, main_stream=%p src0_on_device=%d, src1_on_device=%d, dst_on_device=%d\n",
90-
// ctx.device, main_stream, src0_on_device, src1_on_device, dst_on_device);
91-
92-
// do the computation
93-
op(ctx, src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream);
94-
// print_ggml_tensor("tensor", dst);
95-
}
96-
catch (sycl::exception const &exc) {
97-
98-
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
99-
<< ", line:" << __LINE__ << std::endl;
100-
std::exit(1);
101-
}

ggml/src/ggml-sycl/common.hpp

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -677,8 +677,17 @@ inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_t
677677

678678
bool gpu_has_xmx(sycl::device &dev);
679679

680-
void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
681-
const ggml_tensor *src1, ggml_tensor *dst,
682-
const ggml_sycl_op_flatten_t op);
680+
// Some backend specific macros
681+
#define GGML_SYCL_TENSOR_BINARY_OP_LOCALS \
682+
GGML_TENSOR_LOCALS(int64_t, ne0, dst->src[0], ne) \
683+
GGML_TENSOR_LOCALS(size_t, nb0, dst->src[0], nb) GGML_TENSOR_LOCALS(int64_t, ne1, dst->src[1], ne) \
684+
GGML_TENSOR_LOCALS(size_t, nb1, dst->src[1], nb) GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
685+
GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
686+
687+
#define GGML_SYCL_TENSOR_BINARY_OP_CP_LOCALS \
688+
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
689+
GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
690+
GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
691+
683692

684693
#endif // GGML_SYCL_COMMON_HPP

0 commit comments

Comments
 (0)