@@ -345,6 +345,9 @@ enum vk_conv_shapes {
345345 CONV_SHAPE_COUNT,
346346};
347347
348+ static constexpr uint32_t num_argsort_pipelines = 11;
349+ static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1);
350+
348351struct vk_device_struct {
349352 std::recursive_mutex mutex;
350353
@@ -461,6 +464,7 @@ struct vk_device_struct {
461464 vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bilinear_ac_f32;
462465 vk_pipeline pipeline_scale_f32;
463466 vk_pipeline pipeline_sqr_f32;
467+ vk_pipeline pipeline_sqrt_f32;
464468 vk_pipeline pipeline_sin_f32;
465469 vk_pipeline pipeline_cos_f32;
466470 vk_pipeline pipeline_clamp_f32;
@@ -505,7 +509,7 @@ struct vk_device_struct {
505509 vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
506510 vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
507511 vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
508- vk_pipeline pipeline_argsort_f32;
512+ vk_pipeline pipeline_argsort_f32[num_argsort_pipelines] ;
509513 vk_pipeline pipeline_sum_rows_f32;
510514 vk_pipeline pipeline_argmax_f32;
511515 vk_pipeline pipeline_count_equal_i32;
@@ -870,7 +874,6 @@ struct vk_op_soft_max_push_constants {
870874
871875struct vk_op_argsort_push_constants {
872876 uint32_t ncols;
873- uint32_t ncols_pad;
874877 int32_t order;
875878};
876879
@@ -3029,6 +3032,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
30293032 ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30303033
30313034 ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
3035+ ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30323036 ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30333037 ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30343038
@@ -3099,7 +3103,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
30993103 ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_len, rope_vision_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
31003104 }
31013105
3102- ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
3106+ for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
3107+ ggml_vk_create_pipeline(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1u<<i, 1, 1}, {1u<<i, i}, 1, true);
3108+ }
31033109
31043110 ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
31053111
@@ -6977,6 +6983,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
69776983 return ctx->device->pipeline_sqr_f32;
69786984 }
69796985 return nullptr;
6986+ case GGML_OP_SQRT:
6987+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
6988+ return ctx->device->pipeline_sqrt_f32;
6989+ }
6990+ return nullptr;
69806991 case GGML_OP_SIN:
69816992 if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
69826993 return ctx->device->pipeline_sin_f32;
@@ -7160,7 +7171,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
71607171 }
71617172 case GGML_OP_ARGSORT:
71627173 if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
7163- return ctx->device->pipeline_argsort_f32;
7174+ uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
7175+ return ctx->device->pipeline_argsort_f32[idx];
71647176 }
71657177 return nullptr;
71667178 case GGML_OP_SUM:
@@ -7285,6 +7297,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
72857297 case GGML_OP_CONCAT:
72867298 case GGML_OP_UPSCALE:
72877299 case GGML_OP_SQR:
7300+ case GGML_OP_SQRT:
72887301 case GGML_OP_SIN:
72897302 case GGML_OP_COS:
72907303 case GGML_OP_CLAMP:
@@ -7590,6 +7603,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
75907603 case GGML_OP_MUL:
75917604 case GGML_OP_SCALE:
75927605 case GGML_OP_SQR:
7606+ case GGML_OP_SQRT:
75937607 case GGML_OP_SIN:
75947608 case GGML_OP_COS:
75957609 case GGML_OP_CLAMP:
@@ -8237,6 +8251,10 @@ static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const
82378251 ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst), dryrun);
82388252}
82398253
8254+ static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8255+ ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst), dryrun);
8256+ }
8257+
82408258static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
82418259 ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst), dryrun);
82428260}
@@ -8485,16 +8503,8 @@ static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, c
84858503
84868504 uint32_t ncols = src0->ne[0];
84878505
8488- uint32_t ncols_pad = 1;
8489- while (ncols_pad < ncols) {
8490- ncols_pad *= 2;
8491- }
8492-
8493- GGML_ASSERT(ncols_pad <= 1024);
8494-
84958506 ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
84968507 ncols,
8497- ncols_pad,
84988508 op_params[0],
84998509 }, dryrun);
85008510}
@@ -9700,6 +9710,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97009710 case GGML_OP_UPSCALE:
97019711 case GGML_OP_SCALE:
97029712 case GGML_OP_SQR:
9713+ case GGML_OP_SQRT:
97039714 case GGML_OP_SIN:
97049715 case GGML_OP_COS:
97059716 case GGML_OP_CLAMP:
@@ -9769,6 +9780,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97699780 case GGML_OP_UPSCALE:
97709781 case GGML_OP_SCALE:
97719782 case GGML_OP_SQR:
9783+ case GGML_OP_SQRT:
97729784 case GGML_OP_SIN:
97739785 case GGML_OP_COS:
97749786 case GGML_OP_CLAMP:
@@ -9870,6 +9882,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
98709882 case GGML_OP_SQR:
98719883 ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
98729884
9885+ break;
9886+ case GGML_OP_SQRT:
9887+ ggml_vk_sqrt(ctx, compute_ctx, src0, node, dryrun);
9888+
98739889 break;
98749890 case GGML_OP_SIN:
98759891 ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
@@ -10121,6 +10137,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
1012110137 case GGML_OP_UPSCALE:
1012210138 case GGML_OP_SCALE:
1012310139 case GGML_OP_SQR:
10140+ case GGML_OP_SQRT:
1012410141 case GGML_OP_SIN:
1012510142 case GGML_OP_COS:
1012610143 case GGML_OP_CLAMP:
@@ -11360,13 +11377,16 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1136011377 case GGML_OP_SILU_BACK:
1136111378 case GGML_OP_RMS_NORM_BACK:
1136211379 case GGML_OP_SQR:
11380+ case GGML_OP_SQRT:
1136311381 case GGML_OP_SIN:
1136411382 case GGML_OP_COS:
1136511383 case GGML_OP_CLAMP:
1136611384 case GGML_OP_LEAKY_RELU:
1136711385 case GGML_OP_OPT_STEP_ADAMW:
1136811386 case GGML_OP_OPT_STEP_SGD:
1136911387 return op->src[0]->type == GGML_TYPE_F32;
11388+ case GGML_OP_ARGSORT:
11389+ return op->ne[0] <= max_argsort_cols;
1137011390 case GGML_OP_UPSCALE:
1137111391 case GGML_OP_ACC:
1137211392 case GGML_OP_CONCAT:
@@ -11376,7 +11396,6 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1137611396 case GGML_OP_DIAG_MASK_INF:
1137711397 case GGML_OP_SOFT_MAX:
1137811398 case GGML_OP_SOFT_MAX_BACK:
11379- case GGML_OP_ARGSORT:
1138011399 case GGML_OP_SUM:
1138111400 case GGML_OP_SUM_ROWS:
1138211401 case GGML_OP_ARGMAX:
@@ -11803,6 +11822,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1180311822 tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
1180411823 } else if (tensor->op == GGML_OP_SQR) {
1180511824 tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
11825+ } else if (tensor->op == GGML_OP_SQRT) {
11826+ tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
1180611827 } else if (tensor->op == GGML_OP_SIN) {
1180711828 tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
1180811829 } else if (tensor->op == GGML_OP_COS) {
0 commit comments