@@ -345,6 +345,9 @@ enum vk_conv_shapes {
345345 CONV_SHAPE_COUNT,
346346};
347347
348+ static constexpr uint32_t num_argsort_pipelines = 11;
349+ static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1);
350+
348351struct vk_device_struct {
349352 std::recursive_mutex mutex;
350353
@@ -505,7 +508,7 @@ struct vk_device_struct {
505508 vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16;
506509 vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
507510 vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
508- vk_pipeline pipeline_argsort_f32;
511+ vk_pipeline pipeline_argsort_f32[num_argsort_pipelines] ;
509512 vk_pipeline pipeline_sum_rows_f32;
510513 vk_pipeline pipeline_argmax_f32;
511514 vk_pipeline pipeline_count_equal_i32;
@@ -870,7 +873,6 @@ struct vk_op_soft_max_push_constants {
870873
871874struct vk_op_argsort_push_constants {
872875 uint32_t ncols;
873- uint32_t ncols_pad;
874876 int32_t order;
875877};
876878
@@ -3099,7 +3101,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
30993101 ggml_vk_create_pipeline(device, device->pipeline_rope_vision_f16, "rope_vision_f16", rope_vision_f16_len, rope_vision_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
31003102 }
31013103
3102- ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1);
3104+ for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
3105+ ggml_vk_create_pipeline(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1u<<i, 1, 1}, {1u<<i, i}, 1, true);
3106+ }
31033107
31043108 ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
31053109
@@ -7160,7 +7164,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
71607164 }
71617165 case GGML_OP_ARGSORT:
71627166 if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
7163- return ctx->device->pipeline_argsort_f32;
7167+ uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
7168+ return ctx->device->pipeline_argsort_f32[idx];
71647169 }
71657170 return nullptr;
71667171 case GGML_OP_SUM:
@@ -8485,16 +8490,8 @@ static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, c
84858490
84868491 uint32_t ncols = src0->ne[0];
84878492
8488- uint32_t ncols_pad = 1;
8489- while (ncols_pad < ncols) {
8490- ncols_pad *= 2;
8491- }
8492-
8493- GGML_ASSERT(ncols_pad <= 1024);
8494-
84958493 ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
84968494 ncols,
8497- ncols_pad,
84988495 op_params[0],
84998496 }, dryrun);
85008497}
@@ -11367,6 +11364,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1136711364 case GGML_OP_OPT_STEP_ADAMW:
1136811365 case GGML_OP_OPT_STEP_SGD:
1136911366 return op->src[0]->type == GGML_TYPE_F32;
11367+ case GGML_OP_ARGSORT:
11368+ return op->ne[0] <= max_argsort_cols;
1137011369 case GGML_OP_UPSCALE:
1137111370 case GGML_OP_ACC:
1137211371 case GGML_OP_CONCAT:
@@ -11376,7 +11375,6 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1137611375 case GGML_OP_DIAG_MASK_INF:
1137711376 case GGML_OP_SOFT_MAX:
1137811377 case GGML_OP_SOFT_MAX_BACK:
11379- case GGML_OP_ARGSORT:
1138011378 case GGML_OP_SUM:
1138111379 case GGML_OP_SUM_ROWS:
1138211380 case GGML_OP_ARGMAX:
0 commit comments