@@ -582,6 +582,9 @@ struct vk_device_struct {
582582 vk_pipeline pipeline_pool2d_f32;
583583 vk_pipeline pipeline_rwkv_wkv6_f32;
584584 vk_pipeline pipeline_rwkv_wkv7_f32;
585+ vk_pipeline pipeline_ssm_scan_f32_d16;
586+ vk_pipeline pipeline_ssm_scan_f32_d128;
587+ vk_pipeline pipeline_ssm_scan_f32_d256;
585588 vk_pipeline pipeline_opt_step_adamw_f32;
586589 vk_pipeline pipeline_opt_step_sgd_f32;
587590 vk_pipeline pipeline_conv2d_f32[CONV_SHAPE_COUNT];
@@ -1087,6 +1090,13 @@ struct vk_op_rwkv_wkv7_push_constants {
10871090 uint32_t C;
10881091 uint32_t H;
10891092};
1093+ struct vk_op_ssm_scan_push_constants {
1094+ uint32_t src0_nb2, src0_nb3, src1_nb2, src1_nb3;
1095+ uint32_t src2_nb1, src2_nb2, src3_nb1;
1096+ uint32_t src4_nb2, src4_nb3, src5_nb2, src5_nb3;
1097+ uint32_t s_off;
1098+ uint32_t n_head, d_head, n_group, n_tok;
1099+ };
10901100
10911101struct vk_op_conv2d_push_constants {
10921102 uint32_t Cout;
@@ -3588,6 +3598,10 @@ static void ggml_vk_load_shaders(vk_device& device) {
35883598
35893599 ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1);
35903600
3601+ ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d16, "ssm_scan_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {16, device->subgroup_size, 16}, 1);
3602+ ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1);
3603+ ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1);
3604+
35913605 ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
35923606
35933607 ggml_vk_create_pipeline(device, device->pipeline_opt_step_sgd_f32, "opt_step_sgd_f32", opt_step_sgd_f32_len, opt_step_sgd_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
@@ -8087,6 +8101,18 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
80878101 return ctx->device->pipeline_rwkv_wkv7_f32;
80888102 }
80898103 return nullptr;
8104+ case GGML_OP_SSM_SCAN:
8105+ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
8106+ const uint32_t d_state = src0->ne[0];
8107+ if (d_state == 16) {
8108+ return ctx->device->pipeline_ssm_scan_f32_d16;
8109+ } else if (d_state == 128) {
8110+ return ctx->device->pipeline_ssm_scan_f32_d128;
8111+ } else if (d_state == 256) {
8112+ return ctx->device->pipeline_ssm_scan_f32_d256;
8113+ }
8114+ }
8115+ return nullptr;
80908116 case GGML_OP_OPT_STEP_ADAMW:
80918117 if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
80928118 return ctx->device->pipeline_opt_step_adamw_f32;
@@ -9027,6 +9053,106 @@ static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx,
90279053 );
90289054}
90299055
9056+ static void ggml_vk_ssm_scan(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) {
9057+ const ggml_tensor * src0 = dst->src[0];
9058+ const ggml_tensor * src1 = dst->src[1];
9059+ const ggml_tensor * src2 = dst->src[2];
9060+ const ggml_tensor * src3 = dst->src[3];
9061+ const ggml_tensor * src4 = dst->src[4];
9062+ const ggml_tensor * src5 = dst->src[5];
9063+
9064+ GGML_ASSERT(dst->buffer != nullptr);
9065+
9066+ const uint32_t head_dim = src0->ne[1];
9067+ const uint32_t n_head = src1->ne[1];
9068+ const uint32_t n_group = src4->ne[1];
9069+ const uint32_t n_tok = src1->ne[2];
9070+ const uint32_t n_seq = src1->ne[3];
9071+
9072+ bool is_mamba2 = (src3->nb[1] == sizeof(float));
9073+
9074+ vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, dst->op);
9075+ GGML_ASSERT(pipeline != nullptr);
9076+
9077+ if (dryrun) {
9078+ ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
9079+ return;
9080+ }
9081+
9082+ const int64_t s_off = ggml_nelements(src1) * sizeof(float);
9083+
9084+ const vk_op_ssm_scan_push_constants pc = {
9085+ (uint32_t)src0->nb[2], (uint32_t)src0->nb[3],
9086+ (uint32_t)src1->nb[2], (uint32_t)src1->nb[3],
9087+ (uint32_t)src2->nb[1], (uint32_t)src2->nb[2],
9088+ (uint32_t)src3->nb[1],
9089+ (uint32_t)src4->nb[2], (uint32_t)src4->nb[3],
9090+ (uint32_t)src5->nb[2], (uint32_t)src5->nb[3],
9091+ (uint32_t)s_off,
9092+ n_head, head_dim, n_group, n_tok
9093+ };
9094+
9095+ ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context;
9096+ ggml_backend_vk_buffer_context * src_buf_ctxs[GGML_MAX_SRC];
9097+ for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
9098+ src_buf_ctxs[i] = (ggml_backend_vk_buffer_context *)dst->src[i]->buffer->context;
9099+ }
9100+
9101+ vk_buffer d_D = nullptr, d_srcs[GGML_MAX_SRC] = { nullptr };
9102+ size_t dst_offset = 0, src_offsets[GGML_MAX_SRC] = { 0 };
9103+ bool dst_uma = false, srcs_uma[GGML_MAX_SRC] = { false };
9104+
9105+ if (ctx->device->uma) {
9106+ for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
9107+ ggml_vk_host_get(ctx->device, dst->src[i]->data, d_srcs[i], src_offsets[i]);
9108+ srcs_uma[i] = d_srcs[i] != nullptr;
9109+ }
9110+ ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset);
9111+ dst_uma = d_D != nullptr;
9112+ }
9113+
9114+ if (!dst_uma) {
9115+ d_D = dst_buf_ctx->dev_buffer;
9116+ dst_offset = vk_tensor_offset(dst) + dst->view_offs;
9117+ }
9118+ for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
9119+ if (!srcs_uma[i]) {
9120+ d_srcs[i] = src_buf_ctxs[i]->dev_buffer;
9121+ src_offsets[i] = vk_tensor_offset(dst->src[i]) + dst->src[i]->view_offs;
9122+ }
9123+ }
9124+
9125+ size_t dst_size = ggml_nbytes(dst);
9126+ size_t src_sizes[GGML_MAX_SRC];
9127+ for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) {
9128+ src_sizes[i] = ggml_nbytes(dst->src[i]);
9129+ }
9130+
9131+ std::array<uint32_t, 3> elements;
9132+
9133+ if (is_mamba2) {
9134+ const int splitH = 16;
9135+ const uint32_t num_workgroups_x = CEIL_DIV(n_head * head_dim, splitH);
9136+ const uint32_t num_workgroups_y = n_seq;
9137+ elements = { num_workgroups_x, num_workgroups_y, 1 };
9138+ } else {
9139+ const uint32_t num_workgroups_x = n_seq;
9140+ const uint32_t num_workgroups_y = CEIL_DIV(n_head, 128);
9141+ elements = { num_workgroups_x, num_workgroups_y, 1 };
9142+ }
9143+
9144+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, {
9145+ vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] },
9146+ vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] },
9147+ vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] },
9148+ vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] },
9149+ vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] },
9150+ vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] },
9151+ vk_subbuffer{ d_srcs[6], src_offsets[6], src_sizes[6] },
9152+ vk_subbuffer{ d_D, dst_offset, dst_size }
9153+ }, pc, elements);
9154+ }
9155+
90309156static void ggml_vk_op_f32_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_push_constants&& pc, bool dryrun = false) {
90319157 const ggml_tensor * x = dst->src[0];
90329158 const ggml_tensor * g = dst->src[1];
@@ -10859,6 +10985,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
1085910985 case GGML_OP_CONV_2D_DW:
1086010986 case GGML_OP_RWKV_WKV6:
1086110987 case GGML_OP_RWKV_WKV7:
10988+ case GGML_OP_SSM_SCAN:
1086210989 case GGML_OP_LEAKY_RELU:
1086310990 case GGML_OP_FLASH_ATTN_EXT:
1086410991 case GGML_OP_OPT_STEP_ADAMW:
@@ -11276,6 +11403,11 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
1127611403
1127711404 break;
1127811405
11406+ case GGML_OP_SSM_SCAN:
11407+ ggml_vk_ssm_scan(ctx, compute_ctx, node, dryrun);
11408+
11409+ break;
11410+
1127911411 case GGML_OP_OPT_STEP_ADAMW:
1128011412 ggml_vk_opt_step_adamw(ctx, compute_ctx, node, dryrun);
1128111413
@@ -11387,6 +11519,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
1138711519 case GGML_OP_CONV_2D_DW:
1138811520 case GGML_OP_RWKV_WKV6:
1138911521 case GGML_OP_RWKV_WKV7:
11522+ case GGML_OP_SSM_SCAN:
1139011523 case GGML_OP_LEAKY_RELU:
1139111524 case GGML_OP_REPEAT:
1139211525 case GGML_OP_REPEAT_BACK:
@@ -12867,6 +13000,59 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1286713000 case GGML_OP_RWKV_WKV6:
1286813001 case GGML_OP_RWKV_WKV7:
1286913002 return true;
13003+ case GGML_OP_SSM_SCAN:
13004+ {
13005+ if (ggml_is_quantized(op->src[0]->type) || ggml_is_quantized(op->src[1]->type) || ggml_is_quantized(op->src[2]->type)) {
13006+ return false;
13007+ }
13008+ if (op->src[3] && ggml_is_quantized(op->src[3]->type)) {
13009+ return false;
13010+ }
13011+ if (op->src[4] && ggml_is_quantized(op->src[4]->type)) {
13012+ return false;
13013+ }
13014+ if (op->src[5] && ggml_is_quantized(op->src[5]->type)) {
13015+ return false;
13016+ }
13017+ if (op->src[6] && op->src[6]->type != GGML_TYPE_I32) {
13018+ return false;
13019+ }
13020+ if (op->src[0]->type != GGML_TYPE_F32 || op->type != GGML_TYPE_F32) {
13021+ return false;
13022+ }
13023+
13024+ const uint32_t d_state = op->src[0]->ne[0];
13025+ const uint32_t head_dim = op->src[0]->ne[1];
13026+ const uint32_t n_head = op->src[1]->ne[1];
13027+ const uint32_t n_group = op->src[4] ? op->src[4]->ne[1] : 1;
13028+
13029+ bool is_mamba2 = (op->src[3] && op->src[3]->nb[1] == sizeof(float));
13030+ if (is_mamba2) {
13031+ if ((d_state != 128 && d_state != 256) || head_dim % 16 != 0) {
13032+ return false;
13033+ }
13034+ } else {
13035+ if (n_head % 128 != 0 || head_dim != 1 || n_group != 1 || d_state != 16) {
13036+ return false;
13037+ }
13038+ }
13039+
13040+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
13041+ const vk_device& device = ggml_vk_get_device(ctx->device);
13042+
13043+ const uint32_t splitH = 16;
13044+
13045+ size_t stateC_size = splitH * d_state * sizeof(float);
13046+ size_t subgroup_sdata_size = d_state * sizeof(float);
13047+ size_t total_shared_memory = stateC_size + subgroup_sdata_size;
13048+
13049+ // Check that there is enough memory to hold the stateC buffer when splitH is 16.
13050+ if (total_shared_memory > device->properties.limits.maxComputeSharedMemorySize) {
13051+ return false;
13052+ }
13053+
13054+ return true;
13055+ }
1287013056 case GGML_OP_CONV_TRANSPOSE_1D:
1287113057 return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
1287213058 case GGML_OP_CONV_2D:
@@ -13211,14 +13397,14 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1321113397
1321213398 struct ggml_context * ggml_ctx = ggml_init(iparams);
1321313399
13214- std::array<struct ggml_tensor *, 6 > src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr};
13215- std::array<size_t, 6 > src_size = {0, 0, 0, 0, 0, 0 };
13216- std::array<void *, 6 > src_buffer = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
13217- const char * srci_name[6 ] = {"src0", "src1", "src2", "src3", "src4", "src5"};
13400+ std::array<struct ggml_tensor *, GGML_MAX_SRC > src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr};
13401+ std::array<size_t, GGML_MAX_SRC > src_size = {};
13402+ std::array<void *, GGML_MAX_SRC > src_buffer = {};
13403+ const char * srci_name[GGML_MAX_SRC ] = {"src0", "src1", "src2", "src3", "src4", "src5", "src6", "src7", "src8", "src9 "};
1321813404
1321913405 struct ggml_tensor * tensor_clone = nullptr;
1322013406
13221- for (int i = 0; i < 6 ; i++) {
13407+ for (int i = 0; i < GGML_MAX_SRC ; i++) {
1322213408 ggml_tensor * srci = tensor->src[i];
1322313409 if (fused_rms_norm_mul) {
1322413410 rms_norm_idx = tensor->src[0]->op == GGML_OP_RMS_NORM ? 0 : 1;
@@ -13525,6 +13711,9 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1352513711 src_clone[2]);
1352613712 } else if (tensor->op == GGML_OP_ADD_ID) {
1352713713 tensor_clone = ggml_add_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]);
13714+ } else if (tensor->op == GGML_OP_SSM_SCAN) {
13715+ tensor_clone = ggml_ssm_scan(ggml_ctx, src_clone[0], src_clone[1], src_clone[2],
13716+ src_clone[3], src_clone[4], src_clone[5], src_clone[6]);
1352813717 }
1352913718 else {
1353013719 std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
@@ -13546,7 +13735,7 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1354613735 memcpy(comp_result, tensor_clone->data, comp_size);
1354713736 memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS);
1354813737
13549- for (int i = 0; i < 6 ; i++) {
13738+ for (int i = 0; i < GGML_MAX_SRC ; i++) {
1355013739 if (src_buffer[i] != nullptr) {
1355113740 free(src_buffer[i]);
1355213741 }
0 commit comments