@@ -1015,6 +1015,39 @@ struct vk_op_upscale_push_constants {
1015
1015
float sf0; float sf1; float sf2; float sf3;
1016
1016
};
1017
1017
1018
+ struct vk_op_sum_rows_push_constants
1019
+ {
1020
+ uint32_t n_cols;
1021
+ uint32_t ne01, ne02;
1022
+ uint32_t nb01, nb02, nb03;
1023
+ uint32_t nb11, nb12, nb13;
1024
+ float weight;
1025
+ uint32_t misalign_offsets;
1026
+ uint32_t ne0_12mp, ne0_12L;
1027
+ uint32_t ne0_1mp, ne0_1L;
1028
+ };
1029
+
1030
+ vk_op_sum_rows_push_constants vk_op_sum_rows_push_constants_init(const ggml_tensor * src, const ggml_tensor * dst, int64_t n_cols) {
1031
+ uint32_t type_size = (uint32_t)ggml_type_size(src->type);
1032
+ vk_op_sum_rows_push_constants p = {};
1033
+ p.n_cols = (uint32_t)n_cols;
1034
+ p.ne01 = (uint32_t)src->ne[1];
1035
+ p.ne02 = (uint32_t)src->ne[2];
1036
+ p.nb01 = (uint32_t)src->nb[1] / type_size;
1037
+ p.nb02 = (uint32_t)src->nb[2] / type_size;
1038
+ p.nb03 = (uint32_t)src->nb[3] / type_size;
1039
+ p.nb11 = (uint32_t)dst->nb[1] / type_size;
1040
+ p.nb12 = (uint32_t)dst->nb[2] / type_size;
1041
+ p.nb13 = (uint32_t)dst->nb[3] / type_size;
1042
+ p.weight = 1.0f;
1043
+ return p;
1044
+ }
1045
+
1046
+ template <> void init_pushconst_fastdiv(vk_op_sum_rows_push_constants &p) {
1047
+ init_fastdiv_values(p.ne01*p.ne02, p.ne0_12mp, p.ne0_12L);
1048
+ init_fastdiv_values(p.ne01, p.ne0_1mp, p.ne0_1L);
1049
+ }
1050
+
1018
1051
// Allow pre-recording command buffers
1019
1052
struct vk_staging_memcpy {
1020
1053
vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {}
@@ -3128,7 +3161,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
3128
3161
3129
3162
ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
3130
3163
3131
- ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants ), {1, 1, 1}, { device->subgroup_size }, 1);
3164
+ ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_sum_rows_push_constants ), {1, 1, 1}, { device->subgroup_size }, 1);
3132
3165
3133
3166
ggml_vk_create_pipeline(device, device->pipeline_count_equal_i32, "count_equal_i32", count_equal_i32_len, count_equal_i32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, { device->subgroup_size }, 1);
3134
3167
@@ -7249,6 +7282,7 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
7249
7282
return nullptr;
7250
7283
case GGML_OP_SUM:
7251
7284
case GGML_OP_SUM_ROWS:
7285
+ case GGML_OP_MEAN:
7252
7286
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
7253
7287
return ctx->device->pipeline_sum_rows_f32;
7254
7288
}
@@ -7387,6 +7421,9 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
7387
7421
case GGML_OP_CONV_2D_DW:
7388
7422
case GGML_OP_IM2COL:
7389
7423
case GGML_OP_SET_ROWS:
7424
+ case GGML_OP_SUM:
7425
+ case GGML_OP_SUM_ROWS:
7426
+ case GGML_OP_MEAN:
7390
7427
return true;
7391
7428
default:
7392
7429
return false;
@@ -7421,6 +7458,16 @@ template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk
7421
7458
GGML_UNUSED(src2);
7422
7459
}
7423
7460
7461
+ template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_sum_rows_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
7462
+ const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
7463
+ const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type);
7464
+
7465
+ p.misalign_offsets = (a_offset << 16) | d_offset;
7466
+
7467
+ GGML_UNUSED(src1);
7468
+ GGML_UNUSED(src2);
7469
+ }
7470
+
7424
7471
template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_binary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst) {
7425
7472
const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type);
7426
7473
const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type);
@@ -7571,10 +7618,10 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
7571
7618
d_buf_offset &= ~(ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1);
7572
7619
7573
7620
if (op_supports_incontiguous) {
7574
- x_sz = ggml_nbytes(src0);
7575
- y_sz = use_src1 ? ggml_nbytes(src1) : 0;
7576
- z_sz = use_src2 ? ggml_nbytes(src2) : 0;
7577
- d_sz = ggml_nbytes(dst);
7621
+ x_sz = ggml_nbytes(src0) + get_misalign_bytes(ctx, src0) ;
7622
+ y_sz = use_src1 ? ggml_nbytes(src1) + get_misalign_bytes(ctx, src1) : 0;
7623
+ z_sz = use_src2 ? ggml_nbytes(src2) + get_misalign_bytes(ctx, src2) : 0;
7624
+ d_sz = ggml_nbytes(dst) + get_misalign_bytes(ctx, dst) ;
7578
7625
7579
7626
if (x_buf_offset + x_sz >= d_X->size) {
7580
7627
x_sz = VK_WHOLE_SIZE;
@@ -7602,6 +7649,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
7602
7649
case GGML_OP_SOFT_MAX:
7603
7650
case GGML_OP_SOFT_MAX_BACK:
7604
7651
case GGML_OP_SUM_ROWS:
7652
+ case GGML_OP_MEAN:
7605
7653
case GGML_OP_ARGMAX:
7606
7654
{
7607
7655
const uint32_t nr = ggml_nrows(src0);
@@ -8588,11 +8636,19 @@ static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, c
8588
8636
}
8589
8637
8590
8638
static void ggml_vk_sum(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8591
- ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun);
8639
+ vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, ggml_nelements(src0));
8640
+ ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM, p, dryrun);
8592
8641
}
8593
8642
8594
8643
static void ggml_vk_sum_rows(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8595
- ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, { (uint32_t)src0->ne[0], 0, 0.0f, 0.0f }, dryrun);
8644
+ vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
8645
+ ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SUM_ROWS, p, dryrun);
8646
+ }
8647
+
8648
+ static void ggml_vk_mean(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8649
+ vk_op_sum_rows_push_constants p = vk_op_sum_rows_push_constants_init(src0, dst, src0->ne[0]);
8650
+ p.weight = 1.0f / (float)src0->ne[0];
8651
+ ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_MEAN, p, dryrun);
8596
8652
}
8597
8653
8598
8654
static void ggml_vk_argmax(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
@@ -9815,6 +9871,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
9815
9871
case GGML_OP_ARGSORT:
9816
9872
case GGML_OP_SUM:
9817
9873
case GGML_OP_SUM_ROWS:
9874
+ case GGML_OP_MEAN:
9818
9875
case GGML_OP_ARGMAX:
9819
9876
case GGML_OP_COUNT_EQUAL:
9820
9877
case GGML_OP_IM2COL:
@@ -9884,6 +9941,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
9884
9941
case GGML_OP_ARGSORT:
9885
9942
case GGML_OP_SUM:
9886
9943
case GGML_OP_SUM_ROWS:
9944
+ case GGML_OP_MEAN:
9887
9945
case GGML_OP_ARGMAX:
9888
9946
case GGML_OP_COUNT_EQUAL:
9889
9947
case GGML_OP_IM2COL:
@@ -10087,6 +10145,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
10087
10145
case GGML_OP_SUM_ROWS:
10088
10146
ggml_vk_sum_rows(ctx, compute_ctx, src0, node, dryrun);
10089
10147
10148
+ break;
10149
+ case GGML_OP_MEAN:
10150
+ ggml_vk_mean(ctx, compute_ctx, src0, node, dryrun);
10151
+
10090
10152
break;
10091
10153
case GGML_OP_ARGMAX:
10092
10154
ggml_vk_argmax(ctx, compute_ctx, src0, node, dryrun);
@@ -10246,6 +10308,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
10246
10308
case GGML_OP_ARGSORT:
10247
10309
case GGML_OP_SUM:
10248
10310
case GGML_OP_SUM_ROWS:
10311
+ case GGML_OP_MEAN:
10249
10312
case GGML_OP_ARGMAX:
10250
10313
case GGML_OP_COUNT_EQUAL:
10251
10314
case GGML_OP_IM2COL:
@@ -11483,8 +11546,11 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
11483
11546
case GGML_OP_DIAG_MASK_INF:
11484
11547
case GGML_OP_SOFT_MAX:
11485
11548
case GGML_OP_SOFT_MAX_BACK:
11549
+ return true;
11486
11550
case GGML_OP_SUM:
11487
11551
case GGML_OP_SUM_ROWS:
11552
+ case GGML_OP_MEAN:
11553
+ return op->src[0]->type == GGML_TYPE_F32 && ggml_is_contiguous_rows(op->src[0]);
11488
11554
case GGML_OP_ARGMAX:
11489
11555
case GGML_OP_COUNT_EQUAL:
11490
11556
case GGML_OP_IM2COL:
@@ -12043,6 +12109,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
12043
12109
tensor_clone = ggml_sum(ggml_ctx, src_clone[0]);
12044
12110
} else if (tensor->op == GGML_OP_SUM_ROWS) {
12045
12111
tensor_clone = ggml_sum_rows(ggml_ctx, src_clone[0]);
12112
+ } else if (tensor->op == GGML_OP_MEAN) {
12113
+ tensor_clone = ggml_mean(ggml_ctx, src_clone[0]);
12046
12114
} else if (tensor->op == GGML_OP_ARGMAX) {
12047
12115
tensor_clone = ggml_argmax(ggml_ctx, src_clone[0]);
12048
12116
} else if (tensor->op == GGML_OP_COUNT_EQUAL) {
0 commit comments