Skip to content

Commit 7dda9aa

Browse files
committed
SYCL: remove the unused variables instead of commenting it out
1 parent 4b5470f commit 7dda9aa

File tree

3 files changed

+9
-32
lines changed

3 files changed

+9
-32
lines changed

ggml/src/ggml-sycl/common.cpp

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -64,21 +64,11 @@ void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *sr
6464
const ggml_tensor *src1, ggml_tensor *dst,
6565
const ggml_sycl_op_flatten_t op) try {
6666

67-
// TODO: What's the use of these?
68-
// const int64_t nrows0 = ggml_nrows(src0);
69-
// const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
70-
7167
const bool use_src1 = src1 != nullptr;
7268

7369
GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
7470
GGML_ASSERT( dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT);
7571

76-
// TODO: What are these uses of these?
77-
78-
// ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
79-
// ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
80-
// ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
81-
8272
// dd = data device
8373
float * src0_ddf = (float *) src0->data;
8474
float * src1_ddf = use_src1 ? (float *) src1->data : nullptr;

ggml/src/ggml-sycl/ggml-sycl.cpp

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2873,8 +2873,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
28732873

28742874
ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
28752875
ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
2876-
// TODO: What's the use of this?
2877-
// ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
2876+
28782877

28792878
const bool src0_is_contiguous = ggml_is_contiguous(src0);
28802879
const bool src1_is_contiguous = ggml_is_contiguous(src1);
@@ -3300,8 +3299,7 @@ static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx,
33003299
GGML_ASSERT(src0->type == GGML_TYPE_F16);
33013300

33023301
GGML_TENSOR_BINARY_OP_LOCALS
3303-
// TODO: What's the use of this?
3304-
//const int64_t ne_dst = ggml_nelements(dst);
3302+
33053303

33063304
SYCL_CHECK(ggml_sycl_set_device(ctx.device));
33073305
queue_ptr main_stream = ctx.stream();;
@@ -4234,8 +4232,7 @@ catch (sycl::exception const &exc)
42344232
}
42354233

42364234
static void ggml_backend_sycl_event_wait(ggml_backend_t backend, ggml_backend_event_t event) try {
4237-
// TODO: sycl_ctx is unused here
4238-
// ggml_backend_sycl_context* sycl_ctx = static_cast<ggml_backend_sycl_context*>(backend->context);
4235+
42394236
sycl::event* sycl_event = static_cast<sycl::event*>(event->context);
42404237

42414238
if (ggml_backend_is_sycl(backend)) {

ggml/src/ggml-sycl/mmvq.cpp

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -754,9 +754,7 @@ static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy,
754754
const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE);
755755
{
756756
stream->submit([&](sycl::handler & cgh) {
757-
//TODO: What's the purpose of these?
758-
//auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0];
759-
//auto ksigns64_ptr_ct1 = &ksigns64[0];
757+
760758

761759
cgh.parallel_for(
762760
sycl::nd_range<3>(block_nums * block_dims, block_dims),
@@ -780,9 +778,7 @@ static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy,
780778
{
781779

782780
stream->submit([&](sycl::handler &cgh) {
783-
// TODO: What's the purpose of these?
784-
// auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0];
785-
// auto ksigns64_ptr_ct1 = &ksigns64[0];
781+
786782

787783
cgh.parallel_for(
788784
sycl::nd_range<3>(block_nums * block_dims, block_dims),
@@ -806,9 +802,7 @@ static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy,
806802
{
807803

808804
stream->submit([&](sycl::handler &cgh) {
809-
// TODO: What's the purpose of these?
810-
// auto iq3xxs_grid_ptr_ct1 = &iq3xxs_grid[0];
811-
// auto ksigns64_ptr_ct1 = &ksigns64[0];
805+
812806

813807
cgh.parallel_for(
814808
sycl::nd_range<3>(block_nums * block_dims, block_dims),
@@ -832,8 +826,7 @@ static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy,
832826
{
833827

834828
stream->submit([&](sycl::handler &cgh) {
835-
// TODO: What's the purpose of this?
836-
// auto iq3s_grid_ptr_ct1 = &iq3s_grid[0];
829+
837830

838831
cgh.parallel_for(
839832
sycl::nd_range<3>(block_nums * block_dims, block_dims),
@@ -857,9 +850,7 @@ static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy,
857850
{
858851

859852
stream->submit([&](sycl::handler &cgh) {
860-
// TODO: What's the purpose of these?
861-
// auto iq1s_grid_ptr_ct1 = &iq1s_grid_gpu[0];
862-
// auto ksigns64_ptr_ct1 = &ksigns64[0];
853+
863854

864855
cgh.parallel_for(
865856
sycl::nd_range<3>(block_nums * block_dims, block_dims),
@@ -958,8 +949,7 @@ void ggml_sycl_op_mul_mat_vec_q(
958949
const size_t q8_1_bs = QK8_1;
959950
// the main device has a larger memory buffer to hold the results from all GPUs
960951
// nrows_dst == nrows of the matrix that the kernel writes into
961-
// TODO: nrows_dst is unused. Please check.
962-
// const int64_t nrows_dst = id == ctx.device ? ne00 : row_diff;
952+
963953
for (int i = 0; i < src1_ncols; i++)
964954
{
965955
const size_t src1_ddq_i_offset = i * src1_padded_col_size * q8_1_ts / q8_1_bs;

0 commit comments

Comments
 (0)