Skip to content

Commit ab37137

Browse files
committed
refactor more t->data to tensor_data(t) etc
1 parent b822399 commit ab37137

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+659
-659
lines changed

ggml/src/ggml-cpu/ops.cpp

Lines changed: 446 additions & 446 deletions
Large diffs are not rendered by default.

ggml/src/ggml-cpu/repack.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1239,12 +1239,12 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS, ggml_type PAR
12391239

12401240
int64_t i11_processed = 0;
12411241
for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) {
1242-
ggml_quantize_mat_t<INTER_SIZE, PARAM_TYPE>((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10);
1242+
ggml_quantize_mat_t<INTER_SIZE, PARAM_TYPE>((float *) ((char *) tensor_data(src1) + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10);
12431243
}
12441244

12451245
i11_processed = ne11 - ne11 % 4;
12461246
for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) {
1247-
from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10);
1247+
from_float((float *) ((char *) tensor_data(src1) + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10);
12481248
}
12491249

12501250
ggml_barrier(params->threadpool);
@@ -1332,7 +1332,7 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS, ggml_type PAR
13321332
// src1: float32 => param type
13331333
for (int64_t i12 = 0; i12 < ne12; ++i12) {
13341334
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
1335-
from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11),
1335+
from_float((float *)((char *) tensor_data(src1) + i12 * nb12 + i11 * nb11),
13361336
(void *) (wdata + i12 * nbw2 + i11 * nbw1),
13371337
ne10);
13381338
}
@@ -1348,7 +1348,7 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS, ggml_type PAR
13481348
for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
13491349
for (int32_t id = 0; id < n_ids; ++id) {
13501350
const int32_t i02 =
1351-
*(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]);
1351+
*(const int32_t *) ((const char *) tensor_data(ids) + iid1 * ids->nb[1] + id * ids->nb[0]);
13521352

13531353
GGML_ASSERT(i02 >= 0 && i02 < n_as);
13541354

@@ -1368,7 +1368,7 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS, ggml_type PAR
13681368
continue;
13691369
}
13701370

1371-
const auto * src0_cur = (const char *) src0->data + cur_a*nb02;
1371+
const auto * src0_cur = (const char *) tensor_data(src0) + cur_a*nb02;
13721372

13731373
//const int64_t nr0 = ne01; // src0 rows
13741374
const int64_t nr1 = cne1; // src1 rows

ggml/src/ggml-cpu/unary-ops.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst
9292
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
9393
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
9494

95-
dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
96-
const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
95+
dst_t * dst_ptr = (dst_t *) ((char *) tensor_data(dst) + i03*nb3 + i02*nb2 + i01*nb1 );
96+
const src0_t * src0_ptr = (const src0_t *) ((const char *) tensor_data(src0) + i03*nb03 + i02*nb02 + i01*nb01);
9797

9898
vec_unary_op<op>(ne0, dst_ptr, src0_ptr);
9999
}

ggml/src/ggml-cuda/acc.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
3838
const ggml_tensor * src0 = dst->src[0];
3939
const ggml_tensor * src1 = dst->src[1];
4040

41-
const float * src0_d = (const float *) src0->data;
42-
const float * src1_d = (const float *) src1->data;
43-
float * dst_d = (float *) dst->data;
41+
const float * src0_d = (const float *) tensor_data(src0);
42+
const float * src1_d = (const float *) tensor_data(src1);
43+
float * dst_d = (float *) tensor_data(dst);
4444

4545
cudaStream_t stream = ctx.stream();
4646

ggml/src/ggml-cuda/arange.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ static void arange_f32_cuda(float * dst, const int ne0, const float start, const
1515
}
1616

1717
void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
18-
float * dst_d = (float *)dst->data;
18+
float * dst_d = (float *)tensor_data(dst);
1919
cudaStream_t stream = ctx.stream();
2020

2121
GGML_ASSERT(dst->type == GGML_TYPE_F32);

ggml/src/ggml-cuda/argmax.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
7777
const int64_t ne00 = src0->ne[0];
7878
const int64_t nrows = ggml_nrows(src0);
7979

80-
const float * src0_d = (const float *) src0->data;
81-
int32_t * dst_d = (int32_t *) dst->data;
80+
const float * src0_d = (const float *) tensor_data(src0);
81+
int32_t * dst_d = (int32_t *) tensor_data(dst);
8282

8383
cudaStream_t stream = ctx.stream();
8484

ggml/src/ggml-cuda/argsort.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, co
8787

8888
void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
8989
const ggml_tensor * src0 = dst->src[0];
90-
const float * src0_d = (const float *)src0->data;
91-
float * dst_d = (float *)dst->data;
90+
const float * src0_d = (const float *)tensor_data(src0);
91+
float * dst_d = (float *)tensor_data(dst);
9292
cudaStream_t stream = ctx.stream();
9393

9494
GGML_ASSERT(src0->type == GGML_TYPE_F32);

ggml/src/ggml-cuda/binbcast.cu

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -312,23 +312,23 @@ static void ggml_cuda_op_bin_bcast(
312312
}
313313

314314
void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
315-
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_repeat>>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream());
315+
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_repeat>>(dst, dst->src[0], dst, nullptr, tensor_data(dst->src[0]), tensor_data(dst), ctx.stream());
316316
}
317317

318318
void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
319-
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_add>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
319+
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_add>>(dst->src[0], dst->src[1], dst, tensor_data(dst->src[0]), tensor_data(dst->src[1]), tensor_data(dst), ctx.stream());
320320
}
321321

322322
void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
323-
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_sub>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
323+
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_sub>>(dst->src[0], dst->src[1], dst, tensor_data(dst->src[0]), tensor_data(dst->src[1]), tensor_data(dst), ctx.stream());
324324
}
325325

326326
void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
327-
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_mul>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
327+
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_mul>>(dst->src[0], dst->src[1], dst, tensor_data(dst->src[0]), tensor_data(dst->src[1]), tensor_data(dst), ctx.stream());
328328
}
329329

330330
void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
331-
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_div>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
331+
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_div>>(dst->src[0], dst->src[1], dst, tensor_data(dst->src[0]), tensor_data(dst->src[1]), tensor_data(dst), ctx.stream());
332332
}
333333

334334
void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
@@ -352,8 +352,8 @@ void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst
352352

353353
switch (dst->type) {
354354
case GGML_TYPE_F32: {
355-
const float * src0_d = (const float *) src0->data;
356-
float * dst_d = (float *) dst->data;
355+
const float * src0_d = (const float *) tensor_data(src0);
356+
float * dst_d = (float *) tensor_data(dst);
357357
repeat_back_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s00, s01, s02, s03, ne0, ne1, ne2, ne3, stream);
358358
} break;
359359
default: {

ggml/src/ggml-cuda/clamp.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ static void clamp_cuda(const T * x, T * dst, const T min, const T max, const int
2424

2525
void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
2626
const ggml_tensor * src0 = dst->src[0];
27-
const void * src0_d = src0->data;
28-
void * dst_d = dst->data;
27+
const void * src0_d = tensor_data(src0);
28+
void * dst_d = tensor_data(dst);
2929
cudaStream_t stream = ctx.stream();
3030

3131
GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);

ggml/src/ggml-cuda/concat.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,10 @@ void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
167167
GGML_ASSERT(dst->type == GGML_TYPE_F32);
168168

169169
if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
170-
const float * src0_d = (const float *)src0->data;
171-
const float * src1_d = (const float *)src1->data;
170+
const float * src0_d = (const float *)tensor_data(src0);
171+
const float * src1_d = (const float *)tensor_data(src1);
172172

173-
float * dst_d = (float *)dst->data;
173+
float * dst_d = (float *)tensor_data(dst);
174174

175175
if (dim != 3) {
176176
for (int i3 = 0; i3 < dst->ne[3]; i3++) {
@@ -192,7 +192,7 @@ void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
192192
dim3 grid_dim(dst->ne[1], dst->ne[2], dst->ne[3]);
193193
auto launch_kernel = [&](auto dim) {
194194
concat_f32_non_cont<dim><<<grid_dim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(
195-
(const char *) src0->data, (const char *) src1->data, (char *) dst->data,
195+
(const char *) tensor_data(src0), (const char *) tensor_data(src1), (char *) tensor_data(dst),
196196
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
197197
src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3],
198198
src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],

0 commit comments

Comments
 (0)