@@ -464,6 +464,7 @@ struct vk_device_struct {
464464 vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bilinear_ac_f32;
465465 vk_pipeline pipeline_scale_f32;
466466 vk_pipeline pipeline_sqr_f32;
467+ vk_pipeline pipeline_sqrt_f32;
467468 vk_pipeline pipeline_sin_f32;
468469 vk_pipeline pipeline_cos_f32;
469470 vk_pipeline pipeline_clamp_f32;
@@ -3031,6 +3032,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
30313032 ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30323033
30333034 ggml_vk_create_pipeline(device, device->pipeline_sqr_f32, "sqr_f32", sqr_f32_len, sqr_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
3035+ ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30343036 ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30353037 ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
30363038
@@ -6981,6 +6983,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
69816983 return ctx->device->pipeline_sqr_f32;
69826984 }
69836985 return nullptr;
6986+ case GGML_OP_SQRT:
6987+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
6988+ return ctx->device->pipeline_sqrt_f32;
6989+ }
6990+ return nullptr;
69846991 case GGML_OP_SIN:
69856992 if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
69866993 return ctx->device->pipeline_sin_f32;
@@ -7290,6 +7297,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
72907297 case GGML_OP_CONCAT:
72917298 case GGML_OP_UPSCALE:
72927299 case GGML_OP_SQR:
7300+ case GGML_OP_SQRT:
72937301 case GGML_OP_SIN:
72947302 case GGML_OP_COS:
72957303 case GGML_OP_CLAMP:
@@ -7595,6 +7603,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
75957603 case GGML_OP_MUL:
75967604 case GGML_OP_SCALE:
75977605 case GGML_OP_SQR:
7606+ case GGML_OP_SQRT:
75987607 case GGML_OP_SIN:
75997608 case GGML_OP_COS:
76007609 case GGML_OP_CLAMP:
@@ -8242,6 +8251,10 @@ static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const
82428251 ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQR, vk_op_unary_push_constants_init(src0, dst), dryrun);
82438252}
82448253
8254+ static void ggml_vk_sqrt(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
8255+ ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SQRT, vk_op_unary_push_constants_init(src0, dst), dryrun);
8256+ }
8257+
82458258static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
82468259 ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_SIN, vk_op_unary_push_constants_init(src0, dst), dryrun);
82478260}
@@ -9697,6 +9710,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
96979710 case GGML_OP_UPSCALE:
96989711 case GGML_OP_SCALE:
96999712 case GGML_OP_SQR:
9713+ case GGML_OP_SQRT:
97009714 case GGML_OP_SIN:
97019715 case GGML_OP_COS:
97029716 case GGML_OP_CLAMP:
@@ -9766,6 +9780,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
97669780 case GGML_OP_UPSCALE:
97679781 case GGML_OP_SCALE:
97689782 case GGML_OP_SQR:
9783+ case GGML_OP_SQRT:
97699784 case GGML_OP_SIN:
97709785 case GGML_OP_COS:
97719786 case GGML_OP_CLAMP:
@@ -9867,6 +9882,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
98679882 case GGML_OP_SQR:
98689883 ggml_vk_sqr(ctx, compute_ctx, src0, node, dryrun);
98699884
9885+ break;
9886+ case GGML_OP_SQRT:
9887+ ggml_vk_sqrt(ctx, compute_ctx, src0, node, dryrun);
9888+
98709889 break;
98719890 case GGML_OP_SIN:
98729891 ggml_vk_sin(ctx, compute_ctx, src0, node, dryrun);
@@ -10118,6 +10137,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
1011810137 case GGML_OP_UPSCALE:
1011910138 case GGML_OP_SCALE:
1012010139 case GGML_OP_SQR:
10140+ case GGML_OP_SQRT:
1012110141 case GGML_OP_SIN:
1012210142 case GGML_OP_COS:
1012310143 case GGML_OP_CLAMP:
@@ -11357,6 +11377,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1135711377 case GGML_OP_SILU_BACK:
1135811378 case GGML_OP_RMS_NORM_BACK:
1135911379 case GGML_OP_SQR:
11380+ case GGML_OP_SQRT:
1136011381 case GGML_OP_SIN:
1136111382 case GGML_OP_COS:
1136211383 case GGML_OP_CLAMP:
@@ -11801,6 +11822,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1180111822 tensor_clone = ggml_scale_bias(ggml_ctx, src_clone[0], params[0], params[1]);
1180211823 } else if (tensor->op == GGML_OP_SQR) {
1180311824 tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]);
11825+ } else if (tensor->op == GGML_OP_SQRT) {
11826+ tensor_clone = ggml_sqrt(ggml_ctx, src_clone[0]);
1180411827 } else if (tensor->op == GGML_OP_SIN) {
1180511828 tensor_clone = ggml_sin(ggml_ctx, src_clone[0]);
1180611829 } else if (tensor->op == GGML_OP_COS) {
0 commit comments