From 30ba139e5b973f056ff8c51347d4715ab0e3df93 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Wed, 30 Jul 2025 12:33:06 -0700 Subject: [PATCH 01/24] Add paramater buffer pool, batching of submissions, refactor command building/submission --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 652 +++++++++++++-------------- 1 file changed, 321 insertions(+), 331 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index c5abc69343357..e35c865ea7c61 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -8,7 +8,6 @@ #include "ggml-wgsl-shaders.hpp" #include -#include #include #include @@ -20,26 +19,78 @@ /* Constants */ +#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 16 #define WEBGPU_MUL_MAT_WG_SIZE 64 -#define WEBGPU_MUL_MAT_PARAMS_SIZE (13 * sizeof(uint32_t)) // M, N, K, batch sizes, broadcasts -#define WEBGPU_CPY_PARAMS_SIZE (15 * sizeof(uint32_t)) // strides and offsets +#define WEBGPU_NUM_PARAM_BUFS 100 +#define WEBGPU_PARAMS_BUF_SIZE_BYTES 256 #define WEBGPU_STORAGE_BUF_BINDING_MULT 4 // a storage buffer binding size must be a multiple of 4 /* End Constants */ // This is a "fake" base pointer, since WebGPU buffers do not have pointers to their locations. -static void * const webgpu_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT +static void* const webgpu_ptr_base = (void*)(uintptr_t)0x1000; // NOLINT // Always returns the base offset of a tensor, regardless of views. -static uint64_t webgpu_tensor_offset(const ggml_tensor * tensor) { +static uint64_t webgpu_tensor_offset(const ggml_tensor* tensor) { if (tensor->view_src) { - return (uint8_t *) tensor->view_src->data - (uint8_t *) webgpu_ptr_base; + return (uint8_t*)tensor->view_src->data - (uint8_t*)webgpu_ptr_base; } - return (uint8_t *) tensor->data - (uint8_t *) webgpu_ptr_base; + return (uint8_t*)tensor->data - (uint8_t*)webgpu_ptr_base; } /* Struct definitions */ +// Forward reference +static void ggml_webgpu_create_buffer(wgpu::Device& device, wgpu::Buffer& buffer, size_t size, wgpu::BufferUsage usage, const char* label); + +struct webgpu_param_bufs { + wgpu::Buffer host_buf; + wgpu::Buffer dev_buf; +}; + +// Holds a pool of parameter buffers for WebGPU operations +struct webgpu_param_buf_pool { + std::vector free; + + std::mutex mutex; + std::condition_variable cv; + + void init(wgpu::Device device) { + for (int i = 0; i < WEBGPU_NUM_PARAM_BUFS; i++) { + wgpu::Buffer host_buf; + wgpu::Buffer dev_buf; + ggml_webgpu_create_buffer(device, host_buf, WEBGPU_PARAMS_BUF_SIZE_BYTES, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite, "ggml_webgpu_host_params_buf"); + ggml_webgpu_create_buffer(device, dev_buf, WEBGPU_PARAMS_BUF_SIZE_BYTES, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, "ggml_webgpu_dev_params_buf"); + free.push_back({ host_buf, dev_buf }); + } + } + + webgpu_param_bufs alloc_bufs() { + std::unique_lock lock(mutex); + cv.wait(lock, [this] { + return !free.empty(); + }); + webgpu_param_bufs bufs = free.back(); + free.pop_back(); + return bufs; + } + + void free_bufs(const webgpu_param_bufs& bufs) { + std::lock_guard lock(mutex); + free.push_back(bufs); + cv.notify_one(); + } + + void cleanup() { + std::lock_guard lock(mutex); + for (auto& bufs : free) { + bufs.host_buf.Destroy(); + bufs.dev_buf.Destroy(); + } + free.clear(); + } +}; + // All the base objects needed to run operations on a WebGPU device struct webgpu_context_struct { wgpu::Instance instance; @@ -49,25 +100,27 @@ struct webgpu_context_struct { wgpu::Limits limits; wgpu::SupportedFeatures features; - std::mutex mutex; - bool device_initialized = false; + std::recursive_mutex submit_mutex; + std::mutex get_tensor_mutex; + std::mutex init_mutex; + bool device_init = false; + + // Parameter buffer pool + webgpu_param_buf_pool param_buf_pool; - // pipelines and parameter buffers - // TODO: reuse params buffers for different pipelines when possible wgpu::ComputePipeline memset_pipeline; - wgpu::Buffer memset_params_dev_buf; - wgpu::Buffer memset_params_host_buf; wgpu::ComputePipeline mul_mat_pipeline; - wgpu::Buffer mul_mat_params_dev_buf; - wgpu::Buffer mul_mat_params_host_buf; wgpu::ComputePipeline cpy_pipeline; - wgpu::Buffer cpy_params_dev_buf; - wgpu::Buffer cpy_params_host_buf; size_t memset_bytes_per_thread; // Staging buffer for reading data from the GPU wgpu::Buffer get_tensor_staging_buf; + + // Command buffers which need to be submitted + std::vector staged_command_bufs; + // Parameter buffers associated with the staged command buffers + std::vector staged_param_bufs; }; typedef std::shared_ptr webgpu_context; @@ -76,7 +129,7 @@ struct ggml_backend_webgpu_reg_context { webgpu_context webgpu_ctx; size_t device_count; - const char * name; + const char* name; }; struct ggml_backend_webgpu_device_context { @@ -98,7 +151,7 @@ struct ggml_backend_webgpu_buffer_context { wgpu::Buffer buffer; ggml_backend_webgpu_buffer_context(webgpu_context ctx, wgpu::Buffer buf) : - webgpu_ctx(ctx), buffer(buf) { + webgpu_ctx(std::move(ctx)), buffer(std::move(buf)) { } }; @@ -106,7 +159,7 @@ struct ggml_backend_webgpu_buffer_context { /* WebGPU object initializations */ -static void ggml_webgpu_create_pipeline(wgpu::Device &device, wgpu::ComputePipeline &pipeline, const char * shader_code, const char * label, const std::vector &constants = {}) { +static void ggml_webgpu_create_pipeline(wgpu::Device& device, wgpu::ComputePipeline& pipeline, const char* shader_code, const char* label, const std::vector& constants = {}) { WEBGPU_LOG_DEBUG("ggml_webgpu_create_pipeline()"); wgpu::ShaderSourceWGSL shader_source; shader_source.code = shader_code; @@ -126,7 +179,7 @@ static void ggml_webgpu_create_pipeline(wgpu::Device &device, wgpu::ComputePipel pipeline = device.CreateComputePipeline(&pipeline_desc); } -static void ggml_webgpu_create_buffer(wgpu::Device &device, wgpu::Buffer &buffer, size_t size, wgpu::BufferUsage usage, const char* label) { +static void ggml_webgpu_create_buffer(wgpu::Device& device, wgpu::Buffer& buffer, size_t size, wgpu::BufferUsage usage, const char* label) { WEBGPU_LOG_DEBUG("ggml_webgpu_create_buffer()"); wgpu::BufferDescriptor buffer_desc; @@ -142,9 +195,41 @@ static void ggml_webgpu_create_buffer(wgpu::Device &device, wgpu::Buffer &buffer /** WebGPU Actions */ -static void ggml_backend_webgpu_map_buffer(webgpu_context ctx, wgpu::Buffer buffer, wgpu::MapMode mode, size_t offset, size_t size) { +static void ggml_backend_webgpu_wait_on_submission(webgpu_context& ctx) { + // Wait for the queue to finish processing all commands + ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, + [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to wait on queue: %s\n", message.data); + } + }), + UINT64_MAX + ); +} + +static void ggml_backend_webgpu_submit_queue(webgpu_context& ctx) { + std::lock_guard lock(ctx->submit_mutex); + + ctx->queue.Submit(ctx->staged_command_bufs.size(), ctx->staged_command_bufs.data()); + ctx->staged_command_bufs.clear(); + std::vector staged_param_bufs = std::move(ctx->staged_param_bufs); + // Free the staged parameter buffers once the submission completes + ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [ctx, staged_param_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); + } + // Free the staged parameter buffers + for (const auto& bufs : staged_param_bufs) { + ctx->param_buf_pool.free_bufs(bufs); + } + }); +} + +static void ggml_backend_webgpu_map_buffer(webgpu_context& ctx, wgpu::Buffer& buffer, wgpu::MapMode mode, size_t offset, size_t size) { ctx->instance.WaitAny(buffer.MapAsync( - mode, offset, size, wgpu::CallbackMode::WaitAnyOnly, + mode, offset, size, wgpu::CallbackMode::AllowSpontaneous, [](wgpu::MapAsyncStatus status, wgpu::StringView message) { if (status != wgpu::MapAsyncStatus::Success) { GGML_LOG_ERROR("ggml_webgpu: Failed to map buffer: %s\n", message.data); @@ -154,282 +239,201 @@ static void ggml_backend_webgpu_map_buffer(webgpu_context ctx, wgpu::Buffer buff ); } -static void ggml_backend_webgpu_buffer_memset(webgpu_context ctx, wgpu::Buffer buf, uint32_t value, size_t offset, size_t size) { - std::lock_guard lock(ctx->mutex); - wgpu::Device device = ctx->device; +static void ggml_backend_webgpu_build_and_enqueue(webgpu_context& ctx, wgpu::ComputePipeline& pipeline, std::vector params, std::vector bind_group_entries, uint32_t wg_x, bool submit_imm = false) { + webgpu_param_bufs params_bufs = ctx->param_buf_pool.alloc_bufs(); - // map the host parameters buffer - ggml_backend_webgpu_map_buffer(ctx, ctx->memset_params_host_buf, wgpu::MapMode::Write, 0, ctx->memset_params_host_buf.GetSize()); - uint32_t * params = (uint32_t *) ctx->memset_params_host_buf.GetMappedRange(); + ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, + wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); + uint32_t* _params = (uint32_t*)params_bufs.host_buf.GetMappedRange(); + for (size_t i = 0; i < params.size(); i++) { + _params[i] = params[i]; + }; - params[0] = (uint32_t)offset; - params[1] = (uint32_t)size; - params[2] = value; - ctx->memset_params_host_buf.Unmap(); + params_bufs.host_buf.Unmap(); - wgpu::BindGroupEntry entries[2]; - entries[0].binding = 0; // binding for the buffer to memset - entries[0].buffer = buf; - entries[0].offset = 0; - entries[0].size = buf.GetSize(); - entries[1].binding = 1; // binding for the parameters - entries[1].buffer = ctx->memset_params_dev_buf; - entries[1].offset = 0; - entries[1].size = ctx->memset_params_dev_buf.GetSize(); + uint32_t params_bufs_binding_num = bind_group_entries.size(); + bind_group_entries.push_back({ + .binding = params_bufs_binding_num, + .buffer = params_bufs.dev_buf, + .offset = 0, + .size = params_bufs.dev_buf.GetSize() + }); wgpu::BindGroupDescriptor bind_group_desc; - bind_group_desc.layout = ctx->memset_pipeline.GetBindGroupLayout(0); - bind_group_desc.entryCount = 2; - bind_group_desc.label = "ggml_memset"; - bind_group_desc.entries = entries; - wgpu::BindGroup bind_group = device.CreateBindGroup(&bind_group_desc); + bind_group_desc.layout = pipeline.GetBindGroupLayout(0); + bind_group_desc.entryCount = bind_group_entries.size(); + bind_group_desc.entries = bind_group_entries.data(); + wgpu::BindGroup bind_group = ctx->device.CreateBindGroup(&bind_group_desc); - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::CommandEncoder encoder = ctx->device.CreateCommandEncoder(); encoder.CopyBufferToBuffer( - ctx->memset_params_host_buf, 0, - ctx->memset_params_dev_buf, 0, - ctx->memset_params_dev_buf.GetSize() + params_bufs.host_buf, 0, + params_bufs.dev_buf, 0, + params_bufs.dev_buf.GetSize() ); wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - pass.SetPipeline(ctx->memset_pipeline); + pass.SetPipeline(pipeline); pass.SetBindGroup(0, bind_group); - size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; - pass.DispatchWorkgroups(((size + 3) + bytes_per_wg - 1) / bytes_per_wg, 1, 1); + pass.DispatchWorkgroups(wg_x, 1, 1); pass.End(); - wgpu::CommandBuffer commands = encoder.Finish(); + wgpu::CommandBuffer commands = encoder.Finish(); + if (submit_imm) { + // Submit immediately + ctx->queue.Submit(1, &commands); + ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); + } + ctx->param_buf_pool.free_bufs(params_bufs); + }); + } else { + // Enqueue commands and only submit if we have enough staged commands + std::lock_guard lock(ctx->submit_mutex); + ctx->staged_command_bufs.push_back(commands); + ctx->staged_param_bufs.push_back(params_bufs); + if (ctx->staged_command_bufs.size() == WEBGPU_COMMAND_SUBMIT_BATCH_SIZE) { + ggml_backend_webgpu_submit_queue(ctx); + } + } +} - ctx->queue.Submit(1, &commands); +static void ggml_backend_webgpu_buffer_memset(webgpu_context& ctx, wgpu::Buffer& buf, uint32_t value, size_t offset, size_t size) { + std::vector params = {(uint32_t)offset, (uint32_t)size, value}; + std::vector entries = {{ .binding = 0, .buffer = buf, .offset = 0, .size = buf.GetSize() }}; + size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; + uint32_t wg_x = ((size + 3) + bytes_per_wg - 1) / bytes_per_wg; + ggml_backend_webgpu_build_and_enqueue(ctx, ctx->memset_pipeline, params, entries, wg_x, true); } -static void ggml_backend_webgpu_wait_on_submission(webgpu_context ctx) { - // Wait for the queue to finish processing all commands - ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::WaitAnyOnly, - [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to wait on queue: %s\n", message.data); - } - }), - UINT64_MAX - ); +static size_t ggml_backend_webgpu_tensor_offset(const ggml_tensor* tensor) { + return webgpu_tensor_offset(tensor) + tensor->view_offs; +} + +static wgpu::Buffer ggml_backend_webgpu_tensor_buf(const ggml_tensor* tensor) { + ggml_backend_webgpu_buffer_context* ctx = (ggml_backend_webgpu_buffer_context*)tensor->buffer->context; + return ctx->buffer; } /** End WebGPU Actions */ /** GGML Backend Interface */ -static const char * ggml_backend_webgpu_name(ggml_backend_t backend) { - ggml_backend_webgpu_context * ctx = (ggml_backend_webgpu_context *)backend->context; +static const char* ggml_backend_webgpu_name(ggml_backend_t backend) { + ggml_backend_webgpu_context* ctx = (ggml_backend_webgpu_context*)backend->context; return ctx->name.c_str(); } static void ggml_backend_webgpu_free(ggml_backend_t backend) { - ggml_backend_webgpu_context * ctx = (ggml_backend_webgpu_context *)backend->context; + ggml_backend_webgpu_context* ctx = (ggml_backend_webgpu_context*)backend->context; WEBGPU_LOG_DEBUG("ggml_backend_webgpu_free(" << ctx->name << ")"); // TODO: cleanup GGML_UNUSED(ctx); } +static void ggml_webgpu_cpy(webgpu_context& ctx, ggml_tensor* src, ggml_tensor* dst) { + size_t src_offset = ggml_backend_webgpu_tensor_offset(src); + // assumes power of 2 offset alignment + size_t src_misalignment = src_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); + // align to minimum offset alignment + src_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); + size_t dst_offset = ggml_backend_webgpu_tensor_offset(dst); + size_t dst_misalignment = dst_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); + dst_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); + uint32_t ne = (uint32_t)ggml_nelements(dst); + std::vector params = { + ne, (uint32_t)(src_misalignment / ggml_type_size(src->type)), (uint32_t)(dst_misalignment / ggml_type_size(dst->type)), + // Convert byte-strides to element-strides + (uint32_t)(src->nb[0] / ggml_type_size(src->type)), (uint32_t)(src->nb[1] / ggml_type_size(src->type)), + (uint32_t)(src->nb[2] / ggml_type_size(src->type)), (uint32_t)(src->nb[3] / ggml_type_size(src->type)), + (uint32_t)(dst->nb[0] / ggml_type_size(dst->type)), (uint32_t)(dst->nb[1] / ggml_type_size(dst->type)), + (uint32_t)(dst->nb[2] / ggml_type_size(dst->type)), (uint32_t)(dst->nb[3] / ggml_type_size(dst->type)), + // Logical shape — same for both tensors even if permuted + (uint32_t)src->ne[0], (uint32_t)src->ne[1], (uint32_t)src->ne[2], (uint32_t)src->ne[3] + }; + + std::vector entries = { + { .binding = 0, .buffer = ggml_backend_webgpu_tensor_buf(src), .offset = src_offset, .size = (ggml_nbytes(src) + src_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) }, + { .binding = 1, .buffer = ggml_backend_webgpu_tensor_buf(dst), .offset = dst_offset, .size = (ggml_nbytes(dst) + dst_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) } + }; + + size_t max_wg_size = ctx->limits.maxComputeWorkgroupSizeX; + uint32_t wg_x = (ne + max_wg_size - 1) / max_wg_size; + ggml_backend_webgpu_build_and_enqueue(ctx, ctx->cpy_pipeline, params, entries, wg_x); +} + +static void ggml_webgpu_mul_mat(webgpu_context& ctx, ggml_tensor* src0, ggml_tensor* src1, ggml_tensor* dst) { + std::vector params = { + (uint32_t)dst->ne[1], // number of rows in result (M) + (uint32_t)dst->ne[0], // number of columns in result (N) + (uint32_t)src0->ne[0], // number of columns in src0/src1 (K) + (uint32_t)(src0->nb[1] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 1 + (uint32_t)(src1->nb[1] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 1 + (uint32_t)(src0->nb[2] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 2 + (uint32_t)(src1->nb[2] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 2 + (uint32_t)(src0->nb[3] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 3 + (uint32_t)(src1->nb[3] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 3 + (uint32_t)src0->ne[2], // batch size in dimension 2 + (uint32_t)src0->ne[3], // batch size in dimension 3 + (uint32_t)(src1->ne[2] / src0->ne[2]), // broadcast in dimension 2 + (uint32_t)(src1->ne[3] / src0->ne[3]) // broadcast in dimension 3 + }; + + std::vector entries = { + { .binding = 0, .buffer = ggml_backend_webgpu_tensor_buf(src0), .offset = ggml_backend_webgpu_tensor_offset(src0), .size = ggml_nbytes(src0) }, + { .binding = 1, .buffer = ggml_backend_webgpu_tensor_buf(src1), .offset = ggml_backend_webgpu_tensor_offset(src1), .size = ggml_nbytes(src1) }, + { .binding = 2, .buffer = ggml_backend_webgpu_tensor_buf(dst), .offset = ggml_backend_webgpu_tensor_offset(dst), .size = ggml_nbytes(dst) } + }; + + uint32_t wg_x = (dst->ne[0] * dst->ne[1] * dst->ne[2] * dst->ne[3] + WEBGPU_MUL_MAT_WG_SIZE - 1) / WEBGPU_MUL_MAT_WG_SIZE; + ggml_backend_webgpu_build_and_enqueue(ctx, ctx->mul_mat_pipeline, params, entries, wg_x); +} + // Returns true if node has enqueued work into the queue, false otherwise -static bool ggml_webgpu_encode_node(webgpu_context ctx, ggml_tensor * node){ +static bool ggml_webgpu_encode_node(webgpu_context ctx, ggml_tensor* node) { if (ggml_is_empty(node)) { return false; } - WEBGPU_LOG_DEBUG("ggml_webgpu_encode_node(" << node << ", " << ggml_op_name(node->op) << ")"); + ggml_tensor* src0 = node->src[0]; + ggml_tensor* src1 = node->src[1]; switch (node->op) { // no-ops - case GGML_OP_NONE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - return false; - - case GGML_OP_CPY: { - std::lock_guard lock(ctx->mutex); - const ggml_tensor * src = node->src[0]; - ggml_backend_webgpu_buffer_context * src_ctx = (ggml_backend_webgpu_buffer_context *) src->buffer->context; - size_t src_offset = webgpu_tensor_offset(src) + src->view_offs; - // assumes power of 2 offset alignment - size_t src_misalignment = src_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); - // align to minimum offset alignment - src_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); - ggml_backend_webgpu_buffer_context * dst_ctx = (ggml_backend_webgpu_buffer_context *) node->buffer->context; - size_t dst_offset = webgpu_tensor_offset(node) + node->view_offs; - size_t dst_misalignment = dst_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); - dst_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); - - wgpu::Device device = ctx->device; - ggml_backend_webgpu_map_buffer(ctx, ctx->cpy_params_host_buf, - wgpu::MapMode::Write, 0, ctx->cpy_params_host_buf.GetSize()); - uint32_t * params = (uint32_t *) ctx->cpy_params_host_buf.GetMappedRange(); - uint32_t ne = (uint32_t)ggml_nelements(node); - params[0] = ne; - params[1] = src_misalignment/ggml_type_size(src->type); - params[2] = dst_misalignment/ggml_type_size(node->type); - - // Convert byte-strides to element-strides - params[3] = (uint32_t)src->nb[0]/ggml_type_size(src->type); - params[4] = (uint32_t)src->nb[1]/ggml_type_size(src->type); - params[5] = (uint32_t)src->nb[2]/ggml_type_size(src->type); - params[6] = (uint32_t)src->nb[3]/ggml_type_size(src->type); - params[7] = (uint32_t)node->nb[0]/ggml_type_size(node->type); - params[8] = (uint32_t)node->nb[1]/ggml_type_size(node->type); - params[9] = (uint32_t)node->nb[2]/ggml_type_size(node->type); - params[10] = (uint32_t)node->nb[3]/ggml_type_size(node->type); - // Logical shape — same for both tensors even if permuted - params[11] = (uint32_t)(src->ne[0]); - params[12] = (uint32_t)(src->ne[1]); - params[13] = (uint32_t)(src->ne[2]); - params[14] = (uint32_t)(src->ne[3]); - - ctx->cpy_params_host_buf.Unmap(); - - wgpu::BindGroupEntry entries[3]; - entries[0].binding = 0; - entries[0].buffer = src_ctx->buffer; - entries[0].offset = src_offset; - entries[0].size = (ggml_nbytes(src) + src_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1); - - entries[1].binding = 1; - entries[1].buffer = dst_ctx->buffer; - entries[1].offset = dst_offset; - entries[1].size = (ggml_nbytes(node) + dst_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1); - - entries[2].binding = 2; - entries[2].buffer = ctx->cpy_params_dev_buf; - entries[2].offset = 0; - entries[2].size = ctx->cpy_params_dev_buf.GetSize(); - - wgpu::BindGroupDescriptor bind_group_desc; - bind_group_desc.layout = ctx->cpy_pipeline.GetBindGroupLayout(0); - bind_group_desc.label = "ggml_op_cpy"; - bind_group_desc.entryCount = 3; - bind_group_desc.entries = entries; - wgpu::BindGroup bind_group = device.CreateBindGroup(&bind_group_desc); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - encoder.CopyBufferToBuffer( - ctx->cpy_params_host_buf, 0, - ctx->cpy_params_dev_buf, 0, - ctx->cpy_params_dev_buf.GetSize() - ); - wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - pass.SetPipeline(ctx->cpy_pipeline); - pass.SetBindGroup(0, bind_group); - size_t max_wg_size = ctx->limits.maxComputeWorkgroupSizeX; - pass.DispatchWorkgroups((ne + max_wg_size - 1) / max_wg_size); - pass.End(); - wgpu::CommandBuffer commands = encoder.Finish(); - - // TODO, don't submit here, batch submissions - ctx->queue.Submit(1, &commands); - // TODO, don't wait on submission here - ggml_backend_webgpu_wait_on_submission(ctx); - return true; - } - - case GGML_OP_MUL_MAT: - { - const ggml_tensor * src0 = node->src[0]; - ggml_backend_webgpu_buffer_context * src0_ctx = (ggml_backend_webgpu_buffer_context *) src0->buffer->context; - size_t src0_offset = webgpu_tensor_offset(src0) + src0->view_offs; - const ggml_tensor * src1 = node->src[1]; - ggml_backend_webgpu_buffer_context * src1_ctx = (ggml_backend_webgpu_buffer_context *) src1->buffer->context; - size_t src1_offset = webgpu_tensor_offset(src1) + src1->view_offs; - ggml_backend_webgpu_buffer_context * dst_ctx = (ggml_backend_webgpu_buffer_context *) node->buffer->context; - - size_t dst_offset = webgpu_tensor_offset(node) + node->view_offs; - - wgpu::Device device = ctx->device; - - // map the host parameters buffer - ggml_backend_webgpu_map_buffer(ctx, ctx->mul_mat_params_host_buf, - wgpu::MapMode::Write, 0, ctx->mul_mat_params_host_buf.GetSize()); - uint32_t * params = (uint32_t *) ctx->mul_mat_params_host_buf.GetMappedRange(); - - params[0] = (uint32_t)node->ne[1]; // number of rows in result (M) - params[1] = (uint32_t)node->ne[0]; // number of columns in result (N) - params[2] = (uint32_t)src0->ne[0]; // number of columns in src0/src1 (K) - - params[3] = (uint32_t)src0->nb[1]/ggml_type_size(src0->type); // stride (elements) of src0 in dimension 1 - params[4] = (uint32_t)src1->nb[1]/ggml_type_size(src1->type); // stride (elements) of src1 in dimension 1 - params[5] = (uint32_t)src0->nb[2]/ggml_type_size(src0->type); // stride (elements) of src0 in dimension 2 - params[6] = (uint32_t)src1->nb[2]/ggml_type_size(src1->type); // stride (elements) of src1 in dimension 2 - params[7] = (uint32_t)src0->nb[3]/ggml_type_size(src0->type); // stride (elements) of src0 in dimension 3 - params[8] = (uint32_t)src1->nb[3]/ggml_type_size(src1->type); // stride (elements) of src1 in dimension 3 - - params[9] = (uint32_t)src0->ne[2]; // batch size in dimension 2 - params[10] = (uint32_t)src0->ne[3]; // batch size in dimension 3 - params[11] = (uint32_t)(src1->ne[2]/src0->ne[2]); // broadcast in dimension 2 - params[12] = (uint32_t)(src1->ne[3]/src0->ne[3]); // broadcast in dimension 3 - - ctx->mul_mat_params_host_buf.Unmap(); - - wgpu::BindGroupEntry entries[4]; - entries[0].binding = 0; - entries[0].buffer = src0_ctx->buffer; - entries[0].offset = src0_offset; - entries[0].size = ggml_nbytes(src0); - - entries[1].binding = 1; - entries[1].buffer = src1_ctx->buffer; - entries[1].offset = src1_offset; - entries[1].size = ggml_nbytes(src1); - - entries[2].binding = 2; - entries[2].buffer = dst_ctx->buffer; - entries[2].offset = dst_offset; - entries[2].size = ggml_nbytes(node); - - entries[3].binding = 3; - entries[3].buffer = ctx->mul_mat_params_dev_buf; - entries[3].offset = 0; - entries[3].size = ctx->mul_mat_params_dev_buf.GetSize(); - - wgpu::BindGroupDescriptor bind_group_desc; - bind_group_desc.layout = ctx->mul_mat_pipeline.GetBindGroupLayout(0); - bind_group_desc.entryCount = 4; - bind_group_desc.label = "ggml_op_mul_mat"; - bind_group_desc.entries = entries; - wgpu::BindGroup bind_group = device.CreateBindGroup(&bind_group_desc); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - encoder.CopyBufferToBuffer( - ctx->mul_mat_params_host_buf, 0, - ctx->mul_mat_params_dev_buf, 0, - ctx->mul_mat_params_dev_buf.GetSize() - ); - wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - pass.SetPipeline(ctx->mul_mat_pipeline); - pass.SetBindGroup(0, bind_group); - pass.DispatchWorkgroups((node->ne[0] * node->ne[1] * node->ne[2] * node->ne[3] + WEBGPU_MUL_MAT_WG_SIZE - 1) / WEBGPU_MUL_MAT_WG_SIZE); - pass.End(); - wgpu::CommandBuffer commands = encoder.Finish(); - - // TODO, don't submit here, batch submissions - ctx->queue.Submit(1, &commands); - // TODO, don't wait on submission here - ggml_backend_webgpu_wait_on_submission(ctx); - return true; - } - - default: - return false; + case GGML_OP_NONE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + return false; + case GGML_OP_CPY: { + ggml_webgpu_cpy(ctx, src0, node); + break; } + case GGML_OP_MUL_MAT: { + ggml_webgpu_mul_mat(ctx, src0, src1, node); + break; + } + default: + return false; + } + return true; } -static ggml_status ggml_backend_webgpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { +static ggml_status ggml_backend_webgpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph* cgraph) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_graph_compute(" << cgraph->n_nodes << " nodes)"); - ggml_backend_webgpu_context * backend_ctx = static_cast(backend->context); + ggml_backend_webgpu_context* backend_ctx = static_cast(backend->context); webgpu_context ctx = backend_ctx->webgpu_ctx; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_webgpu_encode_node(ctx, cgraph->nodes[i]); } + ggml_backend_webgpu_submit_queue(ctx); + ggml_backend_webgpu_wait_on_submission(ctx); + return GGML_STATUS_SUCCESS; } @@ -455,17 +459,17 @@ static ggml_backend_i ggml_backend_webgpu_i = { static void ggml_backend_webgpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_free_buffer()"); - ggml_backend_webgpu_buffer_context * ctx = static_cast(buffer->context); + ggml_backend_webgpu_buffer_context* ctx = static_cast(buffer->context); ctx->buffer.Destroy(); } // Returns the "fake" base pointer. -static void * ggml_backend_webgpu_buffer_get_base(ggml_backend_buffer_t buffer) { +static void* ggml_backend_webgpu_buffer_get_base(ggml_backend_buffer_t buffer) { GGML_UNUSED(buffer); return webgpu_ptr_base; } -static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { +static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor, uint8_t value, size_t offset, size_t size) { if (size == 0) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor: size is zero, nothing to do."); return; @@ -473,21 +477,21 @@ static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffe WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; // This is a trick to set all bytes of a u32 to the same 1 byte value. uint32_t val32 = (uint32_t)value * 0x01010101; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, val32, total_offset, size); } -static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { +static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor, const void* data, size_t offset, size_t size) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; - webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size/4)*4); + webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size / 4) * 4); if (size % 4 != 0) { // If size is not a multiple of 4, we need to memset the remaining bytes @@ -495,17 +499,17 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, // pack the remaining bytes into a uint32_t uint32_t val32 = 0; for (size_t i = 0; i < remaining_size; i++) { - ((uint8_t *)&val32)[i] = ((const uint8_t *)data)[size - remaining_size + i]; + ((uint8_t*)&val32)[i] = ((const uint8_t*)data)[size - remaining_size + i]; } // memset the remaining bytes ggml_backend_webgpu_buffer_memset(webgpu_ctx, buf_ctx->buffer, val32, total_offset + (size - remaining_size), remaining_size); } } -static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { +static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; wgpu::Device device = webgpu_ctx->device; @@ -517,7 +521,7 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, final_size = size + (4 - (size % 4)); } - std::lock_guard lock(webgpu_ctx->mutex); + std::lock_guard lock(webgpu_ctx->get_tensor_mutex); if (webgpu_ctx->get_tensor_staging_buf == nullptr || webgpu_ctx->get_tensor_staging_buf.GetSize() < final_size) { @@ -539,7 +543,7 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, // Map the staging buffer to read the data ggml_backend_webgpu_map_buffer(webgpu_ctx, webgpu_ctx->get_tensor_staging_buf, wgpu::MapMode::Read, 0, final_size); // Must specify size here since the staging buffer might be larger than the tensor size - const void * mapped_range = webgpu_ctx->get_tensor_staging_buf.GetConstMappedRange(0, final_size); + const void* mapped_range = webgpu_ctx->get_tensor_staging_buf.GetConstMappedRange(0, final_size); // Copy the data from the mapped range to the output buffer std::memcpy(data, mapped_range, size); @@ -547,9 +551,9 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, } static void ggml_backend_webgpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_clear(" << buffer << ", " << (uint32_t) value << ")"); + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_clear(" << buffer << ", " << (uint32_t)value << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, value, 0, buffer->size); } @@ -569,32 +573,32 @@ static ggml_backend_buffer_i ggml_backend_webgpu_buffer_interface = { /* GGML Backend Buffer Type Interface */ -static const char * ggml_backend_webgpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); +static const char* ggml_backend_webgpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); return ctx->device_name.c_str(); } static ggml_backend_buffer_t ggml_backend_webgpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_type_alloc_buffer(" << size << ")"); - ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); wgpu::Buffer buf; ggml_webgpu_create_buffer(ctx->webgpu_ctx->device, buf, size, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst, "allocated_buffer"); - ggml_backend_webgpu_buffer_context * buf_ctx = new ggml_backend_webgpu_buffer_context(ctx->webgpu_ctx, buf); + ggml_backend_webgpu_buffer_context* buf_ctx = new ggml_backend_webgpu_buffer_context(ctx->webgpu_ctx, buf); return ggml_backend_buffer_init(buft, ggml_backend_webgpu_buffer_interface, buf_ctx, size); } static size_t ggml_backend_webgpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); return ctx->webgpu_ctx->limits.minStorageBufferOffsetAlignment; } // maxBufferSize might be larger, but you can't bind more than maxStorageBufferBindingSize to a single binding. static size_t ggml_backend_webgpu_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); return ctx->webgpu_ctx->limits.maxStorageBufferBindingSize; } @@ -602,18 +606,18 @@ static size_t ggml_backend_webgpu_buffer_type_get_max_size(ggml_backend_buffer_t /* GGML Backend Device Interface */ -static const char * ggml_backend_webgpu_device_get_name(ggml_backend_dev_t dev) { - ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); +static const char* ggml_backend_webgpu_device_get_name(ggml_backend_dev_t dev) { + ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); return ctx->device_name.c_str(); } -static const char * ggml_backend_webgpu_device_get_description(ggml_backend_dev_t dev) { - ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); +static const char* ggml_backend_webgpu_device_get_description(ggml_backend_dev_t dev) { + ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); return ctx->device_desc.c_str(); } -static void ggml_backend_webgpu_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); +static void ggml_backend_webgpu_device_get_memory(ggml_backend_dev_t dev, size_t* free, size_t* total) { + ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); // TODO: what do we actually want to return here? maxBufferSize might not be the full available memory. *free = ctx->webgpu_ctx->limits.maxBufferSize; *total = ctx->webgpu_ctx->limits.maxBufferSize; @@ -624,10 +628,10 @@ static enum ggml_backend_dev_type ggml_backend_webgpu_device_get_type(ggml_backe return GGML_BACKEND_DEVICE_TYPE_GPU; } -static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { - props->name = ggml_backend_webgpu_device_get_name(dev); +static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props* props) { + props->name = ggml_backend_webgpu_device_get_name(dev); props->description = ggml_backend_webgpu_device_get_description(dev); - props->type = ggml_backend_webgpu_device_get_type(dev); + props->type = ggml_backend_webgpu_device_get_type(dev); ggml_backend_webgpu_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ false, @@ -638,11 +642,11 @@ static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct } static ggml_guid_t ggml_backend_webgpu_guid(void) { - static const char * guid_str = "__ggml_webgpu :)"; - return reinterpret_cast((void *)guid_str); + static const char* guid_str = "__ggml_webgpu :)"; + return reinterpret_cast((void*)guid_str); } -static void ggml_webgpu_init_memset_pipeline(webgpu_context webgpu_ctx) { +static void ggml_webgpu_init_memset_pipeline(webgpu_context& webgpu_ctx) { // we use the maximum workgroup size for the memset pipeline size_t max_wg_size = webgpu_ctx->limits.maxComputeWorkgroupSizeX; size_t max_threads = max_wg_size * webgpu_ctx->limits.maxComputeWorkgroupsPerDimension; @@ -654,45 +658,30 @@ static void ggml_webgpu_init_memset_pipeline(webgpu_context webgpu_ctx) { constants[1].key = "bytes_per_thread"; constants[1].value = webgpu_ctx->memset_bytes_per_thread; ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->memset_pipeline, wgsl_memset, "memset", constants); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->memset_params_dev_buf, - 3 * sizeof(uint32_t), // 3 parameters: buffer size, offset, value - wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst, "memset_params_dev_buf"); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->memset_params_host_buf, - 3 * sizeof(uint32_t), wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, "memset_params_host_buf"); } -static void ggml_webgpu_init_mul_mat_pipeline(webgpu_context webgpu_ctx) { +static void ggml_webgpu_init_mul_mat_pipeline(webgpu_context& webgpu_ctx) { ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->mul_mat_pipeline, wgsl_mul_mat, "mul_mat"); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->mul_mat_params_dev_buf, WEBGPU_MUL_MAT_PARAMS_SIZE, - wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst, "mul_mat_params_dev_buf"); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->mul_mat_params_host_buf, WEBGPU_MUL_MAT_PARAMS_SIZE, - wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, "mul_mat_params_host_buf"); } -static void ggml_webgpu_init_cpy_pipeline(webgpu_context webgpu_ctx) { +static void ggml_webgpu_init_cpy_pipeline(webgpu_context& webgpu_ctx) { std::vector constants(1); constants[0].key = "wg_size"; constants[0].value = webgpu_ctx->limits.maxComputeWorkgroupSizeX; - ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->cpy_pipeline, wgsl_cpy, "cpy", constants); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->cpy_params_dev_buf, WEBGPU_CPY_PARAMS_SIZE, - wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst, "cpy_params_dev_buf"); - ggml_webgpu_create_buffer(webgpu_ctx->device, webgpu_ctx->cpy_params_host_buf, WEBGPU_CPY_PARAMS_SIZE, - wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, "cpy_params_host_buf"); } -// TODO: Make thread safe if multiple devices are used -static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, const char * params) { +static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, const char* params) { GGML_UNUSED(params); WEBGPU_LOG_DEBUG("ggml_backend_webgpu_device_init()"); - ggml_backend_webgpu_device_context * dev_ctx = static_cast(dev->context); + ggml_backend_webgpu_device_context* dev_ctx = static_cast(dev->context); webgpu_context webgpu_ctx = dev_ctx->webgpu_ctx; - std::lock_guard lock(webgpu_ctx->mutex); - - if (!webgpu_ctx->device_initialized) { + // Multiple threads may try to initialize the device + std::lock_guard lock(webgpu_ctx->init_mutex); + if (!webgpu_ctx->device_init) { // Initialize device wgpu::DeviceDescriptor dev_desc; dev_desc.requiredLimits = &webgpu_ctx->limits; @@ -702,19 +691,19 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co [](const wgpu::Device& device, wgpu::DeviceLostReason reason, wgpu::StringView message) { GGML_UNUSED(device); GGML_LOG_ERROR("ggml_webgpu: Device lost! Reason: %d, Message: %s\n", static_cast(reason), message.data); - }); + }); dev_desc.SetUncapturedErrorCallback( [](const wgpu::Device& device, wgpu::ErrorType reason, wgpu::StringView message) { GGML_UNUSED(device); GGML_LOG_ERROR("ggml_webgpu: Device error! Reason: %d, Message: %s\n", static_cast(reason), message.data); - }); - webgpu_ctx->instance.WaitAny(webgpu_ctx->adapter.RequestDevice(&dev_desc, wgpu::CallbackMode::WaitAnyOnly, + }); + webgpu_ctx->instance.WaitAny(webgpu_ctx->adapter.RequestDevice(&dev_desc, wgpu::CallbackMode::AllowSpontaneous, [webgpu_ctx](wgpu::RequestDeviceStatus status, wgpu::Device device, wgpu::StringView message) { if (status != wgpu::RequestDeviceStatus::Success) { GGML_LOG_ERROR("ggml_webgpu: Failed to get a device: %s\n", message.data); return; } - webgpu_ctx->device = device; + webgpu_ctx->device = std::move(device); }), UINT64_MAX ); @@ -723,10 +712,13 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co // Initialize (compute) queue webgpu_ctx->queue = webgpu_ctx->device.GetQueue(); + // Create buffer pool for shader parameters + webgpu_ctx->param_buf_pool.init(webgpu_ctx->device); + ggml_webgpu_init_memset_pipeline(webgpu_ctx); ggml_webgpu_init_mul_mat_pipeline(webgpu_ctx); ggml_webgpu_init_cpy_pipeline(webgpu_ctx); - webgpu_ctx->device_initialized = true; + webgpu_ctx->device_init = true; } static ggml_backend_webgpu_context backend_ctx; @@ -767,20 +759,20 @@ static bool ggml_backend_webgpu_device_supports_buft(ggml_backend_dev_t dev, ggm return buft->iface.get_name == ggml_backend_webgpu_buffer_type_get_name; } -static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { +static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor* op) { GGML_UNUSED(dev); switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - return true; - case GGML_OP_CPY: - return op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32; - case GGML_OP_MUL_MAT: - return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; - default: - return false; + case GGML_OP_NONE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + return true; + case GGML_OP_CPY: + return op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32; + case GGML_OP_MUL_MAT: + return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; + default: + return false; } } @@ -806,13 +798,13 @@ static struct ggml_backend_device_i ggml_backend_webgpu_device_i = { /* GGML Backend Registration Interface */ -static const char * ggml_backend_webgpu_reg_get_name(ggml_backend_reg_t reg) { - ggml_backend_webgpu_reg_context * ctx = static_cast(reg->context); +static const char* ggml_backend_webgpu_reg_get_name(ggml_backend_reg_t reg) { + ggml_backend_webgpu_reg_context* ctx = static_cast(reg->context); return ctx->name; } static size_t ggml_backend_webgpu_reg_get_device_count(ggml_backend_reg_t reg) { - ggml_backend_webgpu_reg_context * ctx = static_cast(reg->context); + ggml_backend_webgpu_reg_context* ctx = static_cast(reg->context); return ctx->device_count; } @@ -822,20 +814,20 @@ static ggml_backend_dev_t ggml_backend_webgpu_reg_get_device(ggml_backend_reg_t GGML_ASSERT(index == 0); WEBGPU_LOG_DEBUG("ggml_backend_reg_get_device()"); - ggml_backend_webgpu_reg_context * reg_ctx = static_cast(reg->context); + ggml_backend_webgpu_reg_context* reg_ctx = static_cast(reg->context); webgpu_context ctx = reg_ctx->webgpu_ctx; wgpu::RequestAdapterOptions options = {}; - auto callback = [](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, const char *message, void *userdata) { + auto callback = [](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, const char* message, void* userdata) { if (status != wgpu::RequestAdapterStatus::Success) { GGML_LOG_ERROR("ggml_webgpu: Failed to get an adapter: %s\n", message); return; } - *static_cast(userdata) = adapter; - }; - void *userdata = &ctx->adapter; - ctx->instance.WaitAny(ctx->instance.RequestAdapter(&options, wgpu::CallbackMode::WaitAnyOnly, callback, userdata), UINT64_MAX); + *static_cast(userdata) = std::move(adapter); + }; + void* userdata = &ctx->adapter; + ctx->instance.WaitAny(ctx->instance.RequestAdapter(&options, wgpu::CallbackMode::AllowSpontaneous, callback, userdata), UINT64_MAX); GGML_ASSERT(ctx->adapter != nullptr); ctx->adapter.GetLimits(&ctx->limits); @@ -871,12 +863,10 @@ static const struct ggml_backend_reg_i ggml_backend_webgpu_reg_i = { /* End GGML Backend Registration Interface */ -// TODO: Does this need to be thread safe? Is it only called once? ggml_backend_reg_t ggml_backend_webgpu_reg() { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_reg()"); webgpu_context webgpu_ctx = std::make_shared(); - webgpu_ctx->device_initialized = false; static ggml_backend_webgpu_reg_context ctx; ctx.webgpu_ctx = webgpu_ctx; @@ -884,7 +874,7 @@ ggml_backend_reg_t ggml_backend_webgpu_reg() { ctx.device_count = 1; wgpu::InstanceDescriptor instance_descriptor{}; - std::vector instance_features = {wgpu::InstanceFeatureName::TimedWaitAny}; + std::vector instance_features = { wgpu::InstanceFeatureName::TimedWaitAny }; instance_descriptor.requiredFeatures = instance_features.data(); instance_descriptor.requiredFeatureCount = instance_features.size(); webgpu_ctx->instance = wgpu::CreateInstance(&instance_descriptor); From 04d7b272d65f19e65ec271e6f3442c758f53f3c0 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Wed, 30 Jul 2025 13:45:58 -0700 Subject: [PATCH 02/24] Add header for linux builds --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index e35c865ea7c61..a3f41da1b1ca0 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -7,6 +7,7 @@ #include "ggml-wgsl-shaders.hpp" +#include #include #include #include From 01c8ced232fffdd670a3919741cabdc4e9a63b9f Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Wed, 30 Jul 2025 14:27:29 -0700 Subject: [PATCH 03/24] Free staged parameter buffers at once --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index a3f41da1b1ca0..dcc32e88ef16d 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -76,10 +76,10 @@ struct webgpu_param_buf_pool { return bufs; } - void free_bufs(const webgpu_param_bufs& bufs) { + void free_bufs(std::vector bufs) { std::lock_guard lock(mutex); - free.push_back(bufs); - cv.notify_one(); + free.insert(free.end(), bufs.begin(), bufs.end()); + cv.notify_all(); } void cleanup() { @@ -222,9 +222,7 @@ static void ggml_backend_webgpu_submit_queue(webgpu_context& ctx) { GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); } // Free the staged parameter buffers - for (const auto& bufs : staged_param_bufs) { - ctx->param_buf_pool.free_bufs(bufs); - } + ctx->param_buf_pool.free_bufs(staged_param_bufs); }); } @@ -287,7 +285,7 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context& ctx, wgpu::Com if (status != wgpu::QueueWorkDoneStatus::Success) { GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); } - ctx->param_buf_pool.free_bufs(params_bufs); + ctx->param_buf_pool.free_bufs({params_bufs}); }); } else { // Enqueue commands and only submit if we have enough staged commands From bfff27f130e818fdf7aade536843c9d1e2aa54b4 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Wed, 30 Jul 2025 15:06:09 -0700 Subject: [PATCH 04/24] Format with clang-format --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 666 +++++++++++++++------------ 1 file changed, 381 insertions(+), 285 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index dcc32e88ef16d..666bfbe183e41 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -1,48 +1,56 @@ -#include "ggml-webgpu.h" +/* + WebGPU backend implementation. + Note: Use ClangFormat to format this file. +*/ -#include +#include "ggml-webgpu.h" -#include "ggml-impl.h" #include "ggml-backend-impl.h" - +#include "ggml-impl.h" #include "ggml-wgsl-shaders.hpp" +#include + #include #include #include #include #ifdef GGML_WEBGPU_DEBUG -#define WEBGPU_LOG_DEBUG(msg) std::cout << msg << std::endl +# define WEBGPU_LOG_DEBUG(msg) std::cout << msg << std::endl #else -#define WEBGPU_LOG_DEBUG(msg) ((void) 0) -#endif // GGML_WEBGPU_DEBUG +# define WEBGPU_LOG_DEBUG(msg) ((void) 0) +#endif // GGML_WEBGPU_DEBUG /* Constants */ #define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 16 -#define WEBGPU_MUL_MAT_WG_SIZE 64 -#define WEBGPU_NUM_PARAM_BUFS 100 -#define WEBGPU_PARAMS_BUF_SIZE_BYTES 256 -#define WEBGPU_STORAGE_BUF_BINDING_MULT 4 // a storage buffer binding size must be a multiple of 4 +#define WEBGPU_MUL_MAT_WG_SIZE 64 +#define WEBGPU_NUM_PARAM_BUFS 100 +#define WEBGPU_PARAMS_BUF_SIZE_BYTES 256 +#define WEBGPU_STORAGE_BUF_BINDING_MULT 4 // a storage buffer binding size must be a multiple of 4 /* End Constants */ // This is a "fake" base pointer, since WebGPU buffers do not have pointers to their locations. -static void* const webgpu_ptr_base = (void*)(uintptr_t)0x1000; // NOLINT +static void * const webgpu_ptr_base = (void *) (uintptr_t) 0x1000; // NOLINT // Always returns the base offset of a tensor, regardless of views. -static uint64_t webgpu_tensor_offset(const ggml_tensor* tensor) { +static uint64_t webgpu_tensor_offset(const ggml_tensor * tensor) { if (tensor->view_src) { - return (uint8_t*)tensor->view_src->data - (uint8_t*)webgpu_ptr_base; + return (uint8_t *) tensor->view_src->data - (uint8_t *) webgpu_ptr_base; } - return (uint8_t*)tensor->data - (uint8_t*)webgpu_ptr_base; + return (uint8_t *) tensor->data - (uint8_t *) webgpu_ptr_base; } /* Struct definitions */ // Forward reference -static void ggml_webgpu_create_buffer(wgpu::Device& device, wgpu::Buffer& buffer, size_t size, wgpu::BufferUsage usage, const char* label); +static void ggml_webgpu_create_buffer(wgpu::Device & device, + wgpu::Buffer & buffer, + size_t size, + wgpu::BufferUsage usage, + const char * label); struct webgpu_param_bufs { wgpu::Buffer host_buf; @@ -53,24 +61,30 @@ struct webgpu_param_bufs { struct webgpu_param_buf_pool { std::vector free; - std::mutex mutex; + std::mutex mutex; std::condition_variable cv; void init(wgpu::Device device) { for (int i = 0; i < WEBGPU_NUM_PARAM_BUFS; i++) { wgpu::Buffer host_buf; wgpu::Buffer dev_buf; - ggml_webgpu_create_buffer(device, host_buf, WEBGPU_PARAMS_BUF_SIZE_BYTES, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite, "ggml_webgpu_host_params_buf"); - ggml_webgpu_create_buffer(device, dev_buf, WEBGPU_PARAMS_BUF_SIZE_BYTES, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, "ggml_webgpu_dev_params_buf"); + ggml_webgpu_create_buffer(device, + host_buf, + WEBGPU_PARAMS_BUF_SIZE_BYTES, + wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite, + "ggml_webgpu_host_params_buf"); + ggml_webgpu_create_buffer(device, + dev_buf, + WEBGPU_PARAMS_BUF_SIZE_BYTES, + wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, + "ggml_webgpu_dev_params_buf"); free.push_back({ host_buf, dev_buf }); } } webgpu_param_bufs alloc_bufs() { std::unique_lock lock(mutex); - cv.wait(lock, [this] { - return !free.empty(); - }); + cv.wait(lock, [this] { return !free.empty(); }); webgpu_param_bufs bufs = free.back(); free.pop_back(); return bufs; @@ -84,7 +98,7 @@ struct webgpu_param_buf_pool { void cleanup() { std::lock_guard lock(mutex); - for (auto& bufs : free) { + for (auto & bufs : free) { bufs.host_buf.Destroy(); bufs.dev_buf.Destroy(); } @@ -94,17 +108,17 @@ struct webgpu_param_buf_pool { // All the base objects needed to run operations on a WebGPU device struct webgpu_context_struct { - wgpu::Instance instance; - wgpu::Adapter adapter; - wgpu::Device device; - wgpu::Queue queue; - wgpu::Limits limits; + wgpu::Instance instance; + wgpu::Adapter adapter; + wgpu::Device device; + wgpu::Queue queue; + wgpu::Limits limits; wgpu::SupportedFeatures features; std::recursive_mutex submit_mutex; - std::mutex get_tensor_mutex; - std::mutex init_mutex; - bool device_init = false; + std::mutex get_tensor_mutex; + std::mutex init_mutex; + bool device_init = false; // Parameter buffer pool webgpu_param_buf_pool param_buf_pool; @@ -121,7 +135,7 @@ struct webgpu_context_struct { // Command buffers which need to be submitted std::vector staged_command_bufs; // Parameter buffers associated with the staged command buffers - std::vector staged_param_bufs; + std::vector staged_param_bufs; }; typedef std::shared_ptr webgpu_context; @@ -129,8 +143,8 @@ typedef std::shared_ptr webgpu_context; struct ggml_backend_webgpu_reg_context { webgpu_context webgpu_ctx; - size_t device_count; - const char* name; + size_t device_count; + const char * name; }; struct ggml_backend_webgpu_device_context { @@ -152,63 +166,71 @@ struct ggml_backend_webgpu_buffer_context { wgpu::Buffer buffer; ggml_backend_webgpu_buffer_context(webgpu_context ctx, wgpu::Buffer buf) : - webgpu_ctx(std::move(ctx)), buffer(std::move(buf)) { - } + webgpu_ctx(std::move(ctx)), + buffer(std::move(buf)) {} }; /* End struct definitions */ /* WebGPU object initializations */ -static void ggml_webgpu_create_pipeline(wgpu::Device& device, wgpu::ComputePipeline& pipeline, const char* shader_code, const char* label, const std::vector& constants = {}) { +static void ggml_webgpu_create_pipeline(wgpu::Device & device, + wgpu::ComputePipeline & pipeline, + const char * shader_code, + const char * label, + const std::vector & constants = {}) { WEBGPU_LOG_DEBUG("ggml_webgpu_create_pipeline()"); wgpu::ShaderSourceWGSL shader_source; shader_source.code = shader_code; wgpu::ShaderModuleDescriptor shader_desc; - shader_desc.nextInChain = &shader_source; + shader_desc.nextInChain = &shader_source; wgpu::ShaderModule shader_module = device.CreateShaderModule(&shader_desc); wgpu::ComputePipelineDescriptor pipeline_desc; - pipeline_desc.label = label; - pipeline_desc.compute.module = shader_module; - pipeline_desc.compute.entryPoint = "main"; // Entry point in the WGSL code - pipeline_desc.layout = nullptr; // nullptr means auto layout + pipeline_desc.label = label; + pipeline_desc.compute.module = shader_module; + pipeline_desc.compute.entryPoint = "main"; // Entry point in the WGSL code + pipeline_desc.layout = nullptr; // nullptr means auto layout if (constants.size() > 0) { - pipeline_desc.compute.constants = constants.data(); + pipeline_desc.compute.constants = constants.data(); pipeline_desc.compute.constantCount = constants.size(); } pipeline = device.CreateComputePipeline(&pipeline_desc); } -static void ggml_webgpu_create_buffer(wgpu::Device& device, wgpu::Buffer& buffer, size_t size, wgpu::BufferUsage usage, const char* label) { +static void ggml_webgpu_create_buffer(wgpu::Device & device, + wgpu::Buffer & buffer, + size_t size, + wgpu::BufferUsage usage, + const char * label) { WEBGPU_LOG_DEBUG("ggml_webgpu_create_buffer()"); wgpu::BufferDescriptor buffer_desc; - buffer_desc.size = size; - buffer_desc.usage = usage; - buffer_desc.label = label; + buffer_desc.size = size; + buffer_desc.usage = usage; + buffer_desc.label = label; buffer_desc.mappedAtCreation = false; // TODO: error handling - buffer = device.CreateBuffer(&buffer_desc); + buffer = device.CreateBuffer(&buffer_desc); } /** End WebGPU object initializations */ /** WebGPU Actions */ -static void ggml_backend_webgpu_wait_on_submission(webgpu_context& ctx) { +static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { // Wait for the queue to finish processing all commands - ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, - [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to wait on queue: %s\n", message.data); - } - }), - UINT64_MAX - ); + ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to wait on queue: %s\n", message.data); + } + }), + UINT64_MAX); } -static void ggml_backend_webgpu_submit_queue(webgpu_context& ctx) { +static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { std::lock_guard lock(ctx->submit_mutex); ctx->queue.Submit(ctx->staged_command_bufs.size(), ctx->staged_command_bufs.data()); @@ -226,24 +248,34 @@ static void ggml_backend_webgpu_submit_queue(webgpu_context& ctx) { }); } -static void ggml_backend_webgpu_map_buffer(webgpu_context& ctx, wgpu::Buffer& buffer, wgpu::MapMode mode, size_t offset, size_t size) { - ctx->instance.WaitAny(buffer.MapAsync( - mode, offset, size, wgpu::CallbackMode::AllowSpontaneous, - [](wgpu::MapAsyncStatus status, wgpu::StringView message) { - if (status != wgpu::MapAsyncStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to map buffer: %s\n", message.data); - } - }), - UINT64_MAX - ); -} - -static void ggml_backend_webgpu_build_and_enqueue(webgpu_context& ctx, wgpu::ComputePipeline& pipeline, std::vector params, std::vector bind_group_entries, uint32_t wg_x, bool submit_imm = false) { +static void ggml_backend_webgpu_map_buffer(webgpu_context & ctx, + wgpu::Buffer & buffer, + wgpu::MapMode mode, + size_t offset, + size_t size) { + ctx->instance.WaitAny(buffer.MapAsync(mode, + offset, + size, + wgpu::CallbackMode::AllowSpontaneous, + [](wgpu::MapAsyncStatus status, wgpu::StringView message) { + if (status != wgpu::MapAsyncStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to map buffer: %s\n", + message.data); + } + }), + UINT64_MAX); +} + +static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & ctx, + wgpu::ComputePipeline & pipeline, + std::vector params, + std::vector bind_group_entries, + uint32_t wg_x, + bool submit_imm = false) { webgpu_param_bufs params_bufs = ctx->param_buf_pool.alloc_bufs(); - ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, - wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); - uint32_t* _params = (uint32_t*)params_bufs.host_buf.GetMappedRange(); + ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); + uint32_t * _params = (uint32_t *) params_bufs.host_buf.GetMappedRange(); for (size_t i = 0; i < params.size(); i++) { _params[i] = params[i]; }; @@ -251,42 +283,36 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context& ctx, wgpu::Com params_bufs.host_buf.Unmap(); uint32_t params_bufs_binding_num = bind_group_entries.size(); - bind_group_entries.push_back({ - .binding = params_bufs_binding_num, - .buffer = params_bufs.dev_buf, - .offset = 0, - .size = params_bufs.dev_buf.GetSize() - }); + bind_group_entries.push_back({ .binding = params_bufs_binding_num, + .buffer = params_bufs.dev_buf, + .offset = 0, + .size = params_bufs.dev_buf.GetSize() }); wgpu::BindGroupDescriptor bind_group_desc; - bind_group_desc.layout = pipeline.GetBindGroupLayout(0); + bind_group_desc.layout = pipeline.GetBindGroupLayout(0); bind_group_desc.entryCount = bind_group_entries.size(); - bind_group_desc.entries = bind_group_entries.data(); + bind_group_desc.entries = bind_group_entries.data(); wgpu::BindGroup bind_group = ctx->device.CreateBindGroup(&bind_group_desc); wgpu::CommandEncoder encoder = ctx->device.CreateCommandEncoder(); - encoder.CopyBufferToBuffer( - params_bufs.host_buf, 0, - params_bufs.dev_buf, 0, - params_bufs.dev_buf.GetSize() - ); + encoder.CopyBufferToBuffer(params_bufs.host_buf, 0, params_bufs.dev_buf, 0, params_bufs.dev_buf.GetSize()); wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); pass.SetPipeline(pipeline); pass.SetBindGroup(0, bind_group); pass.DispatchWorkgroups(wg_x, 1, 1); pass.End(); - wgpu::CommandBuffer commands = encoder.Finish(); + wgpu::CommandBuffer commands = encoder.Finish(); if (submit_imm) { // Submit immediately ctx->queue.Submit(1, &commands); - ctx->queue.OnSubmittedWorkDone( - wgpu::CallbackMode::AllowSpontaneous, - [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); - } - ctx->param_buf_pool.free_bufs({params_bufs}); - }); + ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, + [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", + message.data); + } + ctx->param_buf_pool.free_bufs({ params_bufs }); + }); } else { // Enqueue commands and only submit if we have enough staged commands std::lock_guard lock(ctx->submit_mutex); @@ -298,20 +324,26 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context& ctx, wgpu::Com } } -static void ggml_backend_webgpu_buffer_memset(webgpu_context& ctx, wgpu::Buffer& buf, uint32_t value, size_t offset, size_t size) { - std::vector params = {(uint32_t)offset, (uint32_t)size, value}; - std::vector entries = {{ .binding = 0, .buffer = buf, .offset = 0, .size = buf.GetSize() }}; - size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; - uint32_t wg_x = ((size + 3) + bytes_per_wg - 1) / bytes_per_wg; +static void ggml_backend_webgpu_buffer_memset(webgpu_context & ctx, + wgpu::Buffer & buf, + uint32_t value, + size_t offset, + size_t size) { + std::vector params = { (uint32_t) offset, (uint32_t) size, value }; + std::vector entries = { + { .binding = 0, .buffer = buf, .offset = 0, .size = buf.GetSize() } + }; + size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; + uint32_t wg_x = ((size + 3) + bytes_per_wg - 1) / bytes_per_wg; ggml_backend_webgpu_build_and_enqueue(ctx, ctx->memset_pipeline, params, entries, wg_x, true); } -static size_t ggml_backend_webgpu_tensor_offset(const ggml_tensor* tensor) { +static size_t ggml_backend_webgpu_tensor_offset(const ggml_tensor * tensor) { return webgpu_tensor_offset(tensor) + tensor->view_offs; } -static wgpu::Buffer ggml_backend_webgpu_tensor_buf(const ggml_tensor* tensor) { - ggml_backend_webgpu_buffer_context* ctx = (ggml_backend_webgpu_buffer_context*)tensor->buffer->context; +static wgpu::Buffer ggml_backend_webgpu_tensor_buf(const ggml_tensor * tensor) { + ggml_backend_webgpu_buffer_context * ctx = (ggml_backend_webgpu_buffer_context *) tensor->buffer->context; return ctx->buffer; } @@ -319,112 +351,139 @@ static wgpu::Buffer ggml_backend_webgpu_tensor_buf(const ggml_tensor* tensor) { /** GGML Backend Interface */ -static const char* ggml_backend_webgpu_name(ggml_backend_t backend) { - ggml_backend_webgpu_context* ctx = (ggml_backend_webgpu_context*)backend->context; +static const char * ggml_backend_webgpu_name(ggml_backend_t backend) { + ggml_backend_webgpu_context * ctx = (ggml_backend_webgpu_context *) backend->context; return ctx->name.c_str(); } static void ggml_backend_webgpu_free(ggml_backend_t backend) { - ggml_backend_webgpu_context* ctx = (ggml_backend_webgpu_context*)backend->context; + ggml_backend_webgpu_context * ctx = (ggml_backend_webgpu_context *) backend->context; WEBGPU_LOG_DEBUG("ggml_backend_webgpu_free(" << ctx->name << ")"); // TODO: cleanup GGML_UNUSED(ctx); } -static void ggml_webgpu_cpy(webgpu_context& ctx, ggml_tensor* src, ggml_tensor* dst) { - size_t src_offset = ggml_backend_webgpu_tensor_offset(src); +static void ggml_webgpu_cpy(webgpu_context & ctx, ggml_tensor * src, ggml_tensor * dst) { + size_t src_offset = ggml_backend_webgpu_tensor_offset(src); // assumes power of 2 offset alignment size_t src_misalignment = src_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); // align to minimum offset alignment src_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); - size_t dst_offset = ggml_backend_webgpu_tensor_offset(dst); + size_t dst_offset = ggml_backend_webgpu_tensor_offset(dst); size_t dst_misalignment = dst_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); dst_offset &= ~(ctx->limits.minStorageBufferOffsetAlignment - 1); - uint32_t ne = (uint32_t)ggml_nelements(dst); - std::vector params = { - ne, (uint32_t)(src_misalignment / ggml_type_size(src->type)), (uint32_t)(dst_misalignment / ggml_type_size(dst->type)), - // Convert byte-strides to element-strides - (uint32_t)(src->nb[0] / ggml_type_size(src->type)), (uint32_t)(src->nb[1] / ggml_type_size(src->type)), - (uint32_t)(src->nb[2] / ggml_type_size(src->type)), (uint32_t)(src->nb[3] / ggml_type_size(src->type)), - (uint32_t)(dst->nb[0] / ggml_type_size(dst->type)), (uint32_t)(dst->nb[1] / ggml_type_size(dst->type)), - (uint32_t)(dst->nb[2] / ggml_type_size(dst->type)), (uint32_t)(dst->nb[3] / ggml_type_size(dst->type)), - // Logical shape — same for both tensors even if permuted - (uint32_t)src->ne[0], (uint32_t)src->ne[1], (uint32_t)src->ne[2], (uint32_t)src->ne[3] - }; + uint32_t ne = (uint32_t) ggml_nelements(dst); + std::vector params = { ne, + (uint32_t) (src_misalignment / ggml_type_size(src->type)), + (uint32_t) (dst_misalignment / ggml_type_size(dst->type)), + // Convert byte-strides to element-strides + (uint32_t) (src->nb[0] / ggml_type_size(src->type)), + (uint32_t) (src->nb[1] / ggml_type_size(src->type)), + (uint32_t) (src->nb[2] / ggml_type_size(src->type)), + (uint32_t) (src->nb[3] / ggml_type_size(src->type)), + (uint32_t) (dst->nb[0] / ggml_type_size(dst->type)), + (uint32_t) (dst->nb[1] / ggml_type_size(dst->type)), + (uint32_t) (dst->nb[2] / ggml_type_size(dst->type)), + (uint32_t) (dst->nb[3] / ggml_type_size(dst->type)), + // Logical shape — same for both tensors even if permuted + (uint32_t) src->ne[0], + (uint32_t) src->ne[1], + (uint32_t) src->ne[2], + (uint32_t) src->ne[3] }; std::vector entries = { - { .binding = 0, .buffer = ggml_backend_webgpu_tensor_buf(src), .offset = src_offset, .size = (ggml_nbytes(src) + src_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) }, - { .binding = 1, .buffer = ggml_backend_webgpu_tensor_buf(dst), .offset = dst_offset, .size = (ggml_nbytes(dst) + dst_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) } + { .binding = 0, + .buffer = ggml_backend_webgpu_tensor_buf(src), + .offset = src_offset, + .size = (ggml_nbytes(src) + src_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & + ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) }, + { .binding = 1, + .buffer = ggml_backend_webgpu_tensor_buf(dst), + .offset = dst_offset, + .size = (ggml_nbytes(dst) + dst_misalignment + WEBGPU_STORAGE_BUF_BINDING_MULT - 1) & + ~(WEBGPU_STORAGE_BUF_BINDING_MULT - 1) } }; - size_t max_wg_size = ctx->limits.maxComputeWorkgroupSizeX; - uint32_t wg_x = (ne + max_wg_size - 1) / max_wg_size; + size_t max_wg_size = ctx->limits.maxComputeWorkgroupSizeX; + uint32_t wg_x = (ne + max_wg_size - 1) / max_wg_size; ggml_backend_webgpu_build_and_enqueue(ctx, ctx->cpy_pipeline, params, entries, wg_x); } -static void ggml_webgpu_mul_mat(webgpu_context& ctx, ggml_tensor* src0, ggml_tensor* src1, ggml_tensor* dst) { +static void ggml_webgpu_mul_mat(webgpu_context & ctx, ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst) { std::vector params = { - (uint32_t)dst->ne[1], // number of rows in result (M) - (uint32_t)dst->ne[0], // number of columns in result (N) - (uint32_t)src0->ne[0], // number of columns in src0/src1 (K) - (uint32_t)(src0->nb[1] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 1 - (uint32_t)(src1->nb[1] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 1 - (uint32_t)(src0->nb[2] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 2 - (uint32_t)(src1->nb[2] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 2 - (uint32_t)(src0->nb[3] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 3 - (uint32_t)(src1->nb[3] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 3 - (uint32_t)src0->ne[2], // batch size in dimension 2 - (uint32_t)src0->ne[3], // batch size in dimension 3 - (uint32_t)(src1->ne[2] / src0->ne[2]), // broadcast in dimension 2 - (uint32_t)(src1->ne[3] / src0->ne[3]) // broadcast in dimension 3 + (uint32_t) dst->ne[1], // number of rows in result (M) + (uint32_t) dst->ne[0], // number of columns in result (N) + (uint32_t) src0->ne[0], // number of columns in src0/src1 (K) + (uint32_t) (src0->nb[1] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 1 + (uint32_t) (src1->nb[1] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 1 + (uint32_t) (src0->nb[2] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 2 + (uint32_t) (src1->nb[2] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 2 + (uint32_t) (src0->nb[3] / ggml_type_size(src0->type)), // stride (elements) of src0 in dimension 3 + (uint32_t) (src1->nb[3] / ggml_type_size(src1->type)), // stride (elements) of src1 in dimension 3 + (uint32_t) src0->ne[2], // batch size in dimension 2 + (uint32_t) src0->ne[3], // batch size in dimension 3 + (uint32_t) (src1->ne[2] / src0->ne[2]), // broadcast in dimension 2 + (uint32_t) (src1->ne[3] / src0->ne[3]) // broadcast in dimension 3 }; std::vector entries = { - { .binding = 0, .buffer = ggml_backend_webgpu_tensor_buf(src0), .offset = ggml_backend_webgpu_tensor_offset(src0), .size = ggml_nbytes(src0) }, - { .binding = 1, .buffer = ggml_backend_webgpu_tensor_buf(src1), .offset = ggml_backend_webgpu_tensor_offset(src1), .size = ggml_nbytes(src1) }, - { .binding = 2, .buffer = ggml_backend_webgpu_tensor_buf(dst), .offset = ggml_backend_webgpu_tensor_offset(dst), .size = ggml_nbytes(dst) } + { .binding = 0, + .buffer = ggml_backend_webgpu_tensor_buf(src0), + .offset = ggml_backend_webgpu_tensor_offset(src0), + .size = ggml_nbytes(src0) }, + { .binding = 1, + .buffer = ggml_backend_webgpu_tensor_buf(src1), + .offset = ggml_backend_webgpu_tensor_offset(src1), + .size = ggml_nbytes(src1) }, + { .binding = 2, + .buffer = ggml_backend_webgpu_tensor_buf(dst), + .offset = ggml_backend_webgpu_tensor_offset(dst), + .size = ggml_nbytes(dst) } }; - uint32_t wg_x = (dst->ne[0] * dst->ne[1] * dst->ne[2] * dst->ne[3] + WEBGPU_MUL_MAT_WG_SIZE - 1) / WEBGPU_MUL_MAT_WG_SIZE; + uint32_t wg_x = + (dst->ne[0] * dst->ne[1] * dst->ne[2] * dst->ne[3] + WEBGPU_MUL_MAT_WG_SIZE - 1) / WEBGPU_MUL_MAT_WG_SIZE; ggml_backend_webgpu_build_and_enqueue(ctx, ctx->mul_mat_pipeline, params, entries, wg_x); } // Returns true if node has enqueued work into the queue, false otherwise -static bool ggml_webgpu_encode_node(webgpu_context ctx, ggml_tensor* node) { +static bool ggml_webgpu_encode_node(webgpu_context ctx, ggml_tensor * node) { if (ggml_is_empty(node)) { return false; } WEBGPU_LOG_DEBUG("ggml_webgpu_encode_node(" << node << ", " << ggml_op_name(node->op) << ")"); - ggml_tensor* src0 = node->src[0]; - ggml_tensor* src1 = node->src[1]; + ggml_tensor * src0 = node->src[0]; + ggml_tensor * src1 = node->src[1]; switch (node->op) { - // no-ops - case GGML_OP_NONE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - return false; - case GGML_OP_CPY: { - ggml_webgpu_cpy(ctx, src0, node); - break; - } - case GGML_OP_MUL_MAT: { - ggml_webgpu_mul_mat(ctx, src0, src1, node); - break; - } - default: - return false; + // no-ops + case GGML_OP_NONE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + return false; + case GGML_OP_CPY: + { + ggml_webgpu_cpy(ctx, src0, node); + break; + } + case GGML_OP_MUL_MAT: + { + ggml_webgpu_mul_mat(ctx, src0, src1, node); + break; + } + default: + return false; } return true; } -static ggml_status ggml_backend_webgpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph* cgraph) { +static ggml_status ggml_backend_webgpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_graph_compute(" << cgraph->n_nodes << " nodes)"); - ggml_backend_webgpu_context* backend_ctx = static_cast(backend->context); - webgpu_context ctx = backend_ctx->webgpu_ctx; + ggml_backend_webgpu_context * backend_ctx = static_cast(backend->context); + webgpu_context ctx = backend_ctx->webgpu_ctx; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_webgpu_encode_node(ctx, cgraph->nodes[i]); @@ -458,35 +517,45 @@ static ggml_backend_i ggml_backend_webgpu_i = { static void ggml_backend_webgpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_free_buffer()"); - ggml_backend_webgpu_buffer_context* ctx = static_cast(buffer->context); + ggml_backend_webgpu_buffer_context * ctx = static_cast(buffer->context); ctx->buffer.Destroy(); } // Returns the "fake" base pointer. -static void* ggml_backend_webgpu_buffer_get_base(ggml_backend_buffer_t buffer) { +static void * ggml_backend_webgpu_buffer_get_base(ggml_backend_buffer_t buffer) { GGML_UNUSED(buffer); return webgpu_ptr_base; } -static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor, uint8_t value, size_t offset, size_t size) { +static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + uint8_t value, + size_t offset, + size_t size) { if (size == 0) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor: size is zero, nothing to do."); return; } - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " << offset << ", " << size << ")"); + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " + << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; - size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; + ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; // This is a trick to set all bytes of a u32 to the same 1 byte value. - uint32_t val32 = (uint32_t)value * 0x01010101; + uint32_t val32 = (uint32_t) value * 0x01010101; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, val32, total_offset, size); } -static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor, const void* data, size_t offset, size_t size) { - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; - webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; +static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_set_tensor(" << buffer << ", " << tensor << ", " << data << ", " + << offset << ", " << size << ")"); + ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; @@ -494,23 +563,29 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, if (size % 4 != 0) { // If size is not a multiple of 4, we need to memset the remaining bytes - size_t remaining_size = size % 4; + size_t remaining_size = size % 4; // pack the remaining bytes into a uint32_t - uint32_t val32 = 0; + uint32_t val32 = 0; for (size_t i = 0; i < remaining_size; i++) { - ((uint8_t*)&val32)[i] = ((const uint8_t*)data)[size - remaining_size + i]; + ((uint8_t *) &val32)[i] = ((const uint8_t *) data)[size - remaining_size + i]; } // memset the remaining bytes - ggml_backend_webgpu_buffer_memset(webgpu_ctx, buf_ctx->buffer, val32, total_offset + (size - remaining_size), remaining_size); + ggml_backend_webgpu_buffer_memset( + webgpu_ctx, buf_ctx->buffer, val32, total_offset + (size - remaining_size), remaining_size); } } -static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " << offset << ", " << size << ")"); +static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_get_tensor(" << buffer << ", " << tensor << ", " << data << ", " + << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; - webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; - wgpu::Device device = webgpu_ctx->device; + ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + webgpu_context webgpu_ctx = buf_ctx->webgpu_ctx; + wgpu::Device device = webgpu_ctx->device; size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; @@ -522,14 +597,16 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, std::lock_guard lock(webgpu_ctx->get_tensor_mutex); - if (webgpu_ctx->get_tensor_staging_buf == nullptr || - webgpu_ctx->get_tensor_staging_buf.GetSize() < final_size) { + if (webgpu_ctx->get_tensor_staging_buf == nullptr || webgpu_ctx->get_tensor_staging_buf.GetSize() < final_size) { // Create a new staging buffer if it doesn't exist or is too small if (webgpu_ctx->get_tensor_staging_buf) { webgpu_ctx->get_tensor_staging_buf.Destroy(); } - ggml_webgpu_create_buffer(device, webgpu_ctx->get_tensor_staging_buf, final_size, - wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead, "get_tensor_staging_buf"); + ggml_webgpu_create_buffer(device, + webgpu_ctx->get_tensor_staging_buf, + final_size, + wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead, + "get_tensor_staging_buf"); } // Copy the data from the buffer to the staging buffer @@ -542,7 +619,7 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, // Map the staging buffer to read the data ggml_backend_webgpu_map_buffer(webgpu_ctx, webgpu_ctx->get_tensor_staging_buf, wgpu::MapMode::Read, 0, final_size); // Must specify size here since the staging buffer might be larger than the tensor size - const void* mapped_range = webgpu_ctx->get_tensor_staging_buf.GetConstMappedRange(0, final_size); + const void * mapped_range = webgpu_ctx->get_tensor_staging_buf.GetConstMappedRange(0, final_size); // Copy the data from the mapped range to the output buffer std::memcpy(data, mapped_range, size); @@ -550,54 +627,58 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, } static void ggml_backend_webgpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_clear(" << buffer << ", " << (uint32_t)value << ")"); + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_clear(" << buffer << ", " << (uint32_t) value << ")"); - ggml_backend_webgpu_buffer_context* buf_ctx = (ggml_backend_webgpu_buffer_context*)buffer->context; + ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, value, 0, buffer->size); } static ggml_backend_buffer_i ggml_backend_webgpu_buffer_interface = { /* .free_buffer = */ ggml_backend_webgpu_buffer_free_buffer, /* .get_base = */ ggml_backend_webgpu_buffer_get_base, - /* .init_tensor = */ NULL, // TODO: optional, needed? + /* .init_tensor = */ NULL, // TODO: optional, needed? /* .memset_tensor = */ ggml_backend_webgpu_buffer_memset_tensor, /* .set_tensor = */ ggml_backend_webgpu_buffer_set_tensor, /* .get_tensor = */ ggml_backend_webgpu_buffer_get_tensor, - /* .cpy_tensor = */ NULL, // TODO: optional, implement this + /* .cpy_tensor = */ NULL, // TODO: optional, implement this /* .clear = */ ggml_backend_webgpu_buffer_clear, - /* .reset = */ NULL, // TODO: optional, think it coordinates with .init_tensor + /* .reset = */ NULL, // TODO: optional, think it coordinates with .init_tensor }; /* End GGML Backend Buffer Interface */ /* GGML Backend Buffer Type Interface */ -static const char* ggml_backend_webgpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); +static const char * ggml_backend_webgpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); return ctx->device_name.c_str(); } -static ggml_backend_buffer_t ggml_backend_webgpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { +static ggml_backend_buffer_t ggml_backend_webgpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, + size_t size) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_type_alloc_buffer(" << size << ")"); - ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); wgpu::Buffer buf; - ggml_webgpu_create_buffer(ctx->webgpu_ctx->device, buf, size, - wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst, "allocated_buffer"); + ggml_webgpu_create_buffer(ctx->webgpu_ctx->device, + buf, + size, + wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst, + "allocated_buffer"); - ggml_backend_webgpu_buffer_context* buf_ctx = new ggml_backend_webgpu_buffer_context(ctx->webgpu_ctx, buf); + ggml_backend_webgpu_buffer_context * buf_ctx = new ggml_backend_webgpu_buffer_context(ctx->webgpu_ctx, buf); return ggml_backend_buffer_init(buft, ggml_backend_webgpu_buffer_interface, buf_ctx, size); } static size_t ggml_backend_webgpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); return ctx->webgpu_ctx->limits.minStorageBufferOffsetAlignment; } // maxBufferSize might be larger, but you can't bind more than maxStorageBufferBindingSize to a single binding. static size_t ggml_backend_webgpu_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { - ggml_backend_webgpu_device_context* ctx = static_cast(buft->device->context); + ggml_backend_webgpu_device_context * ctx = static_cast(buft->device->context); return ctx->webgpu_ctx->limits.maxStorageBufferBindingSize; } @@ -605,21 +686,21 @@ static size_t ggml_backend_webgpu_buffer_type_get_max_size(ggml_backend_buffer_t /* GGML Backend Device Interface */ -static const char* ggml_backend_webgpu_device_get_name(ggml_backend_dev_t dev) { - ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); +static const char * ggml_backend_webgpu_device_get_name(ggml_backend_dev_t dev) { + ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); return ctx->device_name.c_str(); } -static const char* ggml_backend_webgpu_device_get_description(ggml_backend_dev_t dev) { - ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); +static const char * ggml_backend_webgpu_device_get_description(ggml_backend_dev_t dev) { + ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); return ctx->device_desc.c_str(); } -static void ggml_backend_webgpu_device_get_memory(ggml_backend_dev_t dev, size_t* free, size_t* total) { - ggml_backend_webgpu_device_context* ctx = static_cast(dev->context); +static void ggml_backend_webgpu_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { + ggml_backend_webgpu_device_context * ctx = static_cast(dev->context); // TODO: what do we actually want to return here? maxBufferSize might not be the full available memory. - *free = ctx->webgpu_ctx->limits.maxBufferSize; - *total = ctx->webgpu_ctx->limits.maxBufferSize; + *free = ctx->webgpu_ctx->limits.maxBufferSize; + *total = ctx->webgpu_ctx->limits.maxBufferSize; } static enum ggml_backend_dev_type ggml_backend_webgpu_device_get_type(ggml_backend_dev_t dev) { @@ -627,10 +708,10 @@ static enum ggml_backend_dev_type ggml_backend_webgpu_device_get_type(ggml_backe return GGML_BACKEND_DEVICE_TYPE_GPU; } -static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props* props) { - props->name = ggml_backend_webgpu_device_get_name(dev); +static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + props->name = ggml_backend_webgpu_device_get_name(dev); props->description = ggml_backend_webgpu_device_get_description(dev); - props->type = ggml_backend_webgpu_device_get_type(dev); + props->type = ggml_backend_webgpu_device_get_type(dev); ggml_backend_webgpu_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = { /* .async = */ false, @@ -641,71 +722,77 @@ static void ggml_backend_webgpu_device_get_props(ggml_backend_dev_t dev, struct } static ggml_guid_t ggml_backend_webgpu_guid(void) { - static const char* guid_str = "__ggml_webgpu :)"; - return reinterpret_cast((void*)guid_str); + static const char * guid_str = "__ggml_webgpu :)"; + return reinterpret_cast((void *) guid_str); } -static void ggml_webgpu_init_memset_pipeline(webgpu_context& webgpu_ctx) { +static void ggml_webgpu_init_memset_pipeline(webgpu_context & webgpu_ctx) { // we use the maximum workgroup size for the memset pipeline size_t max_wg_size = webgpu_ctx->limits.maxComputeWorkgroupSizeX; size_t max_threads = max_wg_size * webgpu_ctx->limits.maxComputeWorkgroupsPerDimension; // Size the bytes_per_thread so that the largest buffer size can be handled - webgpu_ctx->memset_bytes_per_thread = (webgpu_ctx->limits.maxStorageBufferBindingSize + max_threads - 1) / max_threads; + webgpu_ctx->memset_bytes_per_thread = + (webgpu_ctx->limits.maxStorageBufferBindingSize + max_threads - 1) / max_threads; std::vector constants(2); - constants[0].key = "wg_size"; + constants[0].key = "wg_size"; constants[0].value = max_wg_size; - constants[1].key = "bytes_per_thread"; + constants[1].key = "bytes_per_thread"; constants[1].value = webgpu_ctx->memset_bytes_per_thread; ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->memset_pipeline, wgsl_memset, "memset", constants); } -static void ggml_webgpu_init_mul_mat_pipeline(webgpu_context& webgpu_ctx) { +static void ggml_webgpu_init_mul_mat_pipeline(webgpu_context & webgpu_ctx) { ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->mul_mat_pipeline, wgsl_mul_mat, "mul_mat"); } -static void ggml_webgpu_init_cpy_pipeline(webgpu_context& webgpu_ctx) { +static void ggml_webgpu_init_cpy_pipeline(webgpu_context & webgpu_ctx) { std::vector constants(1); - constants[0].key = "wg_size"; + constants[0].key = "wg_size"; constants[0].value = webgpu_ctx->limits.maxComputeWorkgroupSizeX; ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->cpy_pipeline, wgsl_cpy, "cpy", constants); } -static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, const char* params) { +static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, const char * params) { GGML_UNUSED(params); WEBGPU_LOG_DEBUG("ggml_backend_webgpu_device_init()"); - ggml_backend_webgpu_device_context* dev_ctx = static_cast(dev->context); - webgpu_context webgpu_ctx = dev_ctx->webgpu_ctx; + ggml_backend_webgpu_device_context * dev_ctx = static_cast(dev->context); + webgpu_context webgpu_ctx = dev_ctx->webgpu_ctx; // Multiple threads may try to initialize the device std::lock_guard lock(webgpu_ctx->init_mutex); if (!webgpu_ctx->device_init) { // Initialize device wgpu::DeviceDescriptor dev_desc; - dev_desc.requiredLimits = &webgpu_ctx->limits; - dev_desc.requiredFeatures = webgpu_ctx->features.features; + dev_desc.requiredLimits = &webgpu_ctx->limits; + dev_desc.requiredFeatures = webgpu_ctx->features.features; dev_desc.requiredFeatureCount = webgpu_ctx->features.featureCount; - dev_desc.SetDeviceLostCallback(wgpu::CallbackMode::AllowSpontaneous, - [](const wgpu::Device& device, wgpu::DeviceLostReason reason, wgpu::StringView message) { + dev_desc.SetDeviceLostCallback( + wgpu::CallbackMode::AllowSpontaneous, + [](const wgpu::Device & device, wgpu::DeviceLostReason reason, wgpu::StringView message) { GGML_UNUSED(device); - GGML_LOG_ERROR("ggml_webgpu: Device lost! Reason: %d, Message: %s\n", static_cast(reason), message.data); + GGML_LOG_ERROR( + "ggml_webgpu: Device lost! Reason: %d, Message: %s\n", static_cast(reason), message.data); }); dev_desc.SetUncapturedErrorCallback( - [](const wgpu::Device& device, wgpu::ErrorType reason, wgpu::StringView message) { + [](const wgpu::Device & device, wgpu::ErrorType reason, wgpu::StringView message) { GGML_UNUSED(device); - GGML_LOG_ERROR("ggml_webgpu: Device error! Reason: %d, Message: %s\n", static_cast(reason), message.data); + GGML_LOG_ERROR( + "ggml_webgpu: Device error! Reason: %d, Message: %s\n", static_cast(reason), message.data); }); - webgpu_ctx->instance.WaitAny(webgpu_ctx->adapter.RequestDevice(&dev_desc, wgpu::CallbackMode::AllowSpontaneous, - [webgpu_ctx](wgpu::RequestDeviceStatus status, wgpu::Device device, wgpu::StringView message) { - if (status != wgpu::RequestDeviceStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to get a device: %s\n", message.data); - return; - } - webgpu_ctx->device = std::move(device); - }), - UINT64_MAX - ); + webgpu_ctx->instance.WaitAny( + webgpu_ctx->adapter.RequestDevice( + &dev_desc, + wgpu::CallbackMode::AllowSpontaneous, + [webgpu_ctx](wgpu::RequestDeviceStatus status, wgpu::Device device, wgpu::StringView message) { + if (status != wgpu::RequestDeviceStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to get a device: %s\n", message.data); + return; + } + webgpu_ctx->device = std::move(device); + }), + UINT64_MAX); GGML_ASSERT(webgpu_ctx->device != nullptr); // Initialize (compute) queue @@ -721,7 +808,7 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co } static ggml_backend_webgpu_context backend_ctx; - backend_ctx.name = GGML_WEBGPU_NAME + std::string(": ") + dev_ctx->device_name; + backend_ctx.name = GGML_WEBGPU_NAME + std::string(": ") + dev_ctx->device_name; backend_ctx.webgpu_ctx = webgpu_ctx; // See GGML Backend Interface section @@ -739,14 +826,15 @@ static ggml_backend_buffer_type_t ggml_backend_webgpu_device_get_buffer_type(ggm // See GGML Backend Buffer Type Interface section static struct ggml_backend_buffer_type ggml_backend_webgpu_buffer_type = { /* .iface = */ { - /* .get_name = */ ggml_backend_webgpu_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_webgpu_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_webgpu_buffer_type_get_alignment, - /* .get_max_size = */ ggml_backend_webgpu_buffer_type_get_max_size, - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes - /* .is_host = */ NULL, // defaults to false + /* .get_name = */ ggml_backend_webgpu_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_webgpu_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_webgpu_buffer_type_get_alignment, + /* .get_max_size = */ ggml_backend_webgpu_buffer_type_get_max_size, + /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes + /* .is_host = */ NULL, // defaults to false }, - /* .device = */ dev, + /* .device = */ + dev, /* .context = */ NULL, }; @@ -755,23 +843,23 @@ static ggml_backend_buffer_type_t ggml_backend_webgpu_device_get_buffer_type(ggm static bool ggml_backend_webgpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { GGML_UNUSED(dev); - return buft->iface.get_name == ggml_backend_webgpu_buffer_type_get_name; + return buft->iface.get_name == ggml_backend_webgpu_buffer_type_get_name; } -static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor* op) { +static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { GGML_UNUSED(dev); switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - return true; - case GGML_OP_CPY: - return op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32; - case GGML_OP_MUL_MAT: - return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; - default: - return false; + case GGML_OP_NONE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + return true; + case GGML_OP_CPY: + return op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32; + case GGML_OP_MUL_MAT: + return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; + default: + return false; } } @@ -797,13 +885,13 @@ static struct ggml_backend_device_i ggml_backend_webgpu_device_i = { /* GGML Backend Registration Interface */ -static const char* ggml_backend_webgpu_reg_get_name(ggml_backend_reg_t reg) { - ggml_backend_webgpu_reg_context* ctx = static_cast(reg->context); +static const char * ggml_backend_webgpu_reg_get_name(ggml_backend_reg_t reg) { + ggml_backend_webgpu_reg_context * ctx = static_cast(reg->context); return ctx->name; } static size_t ggml_backend_webgpu_reg_get_device_count(ggml_backend_reg_t reg) { - ggml_backend_webgpu_reg_context* ctx = static_cast(reg->context); + ggml_backend_webgpu_reg_context * ctx = static_cast(reg->context); return ctx->device_count; } @@ -813,20 +901,22 @@ static ggml_backend_dev_t ggml_backend_webgpu_reg_get_device(ggml_backend_reg_t GGML_ASSERT(index == 0); WEBGPU_LOG_DEBUG("ggml_backend_reg_get_device()"); - ggml_backend_webgpu_reg_context* reg_ctx = static_cast(reg->context); + ggml_backend_webgpu_reg_context * reg_ctx = static_cast(reg->context); webgpu_context ctx = reg_ctx->webgpu_ctx; wgpu::RequestAdapterOptions options = {}; - auto callback = [](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, const char* message, void* userdata) { - if (status != wgpu::RequestAdapterStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to get an adapter: %s\n", message); - return; - } - *static_cast(userdata) = std::move(adapter); + auto callback = + [](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, const char * message, void * userdata) { + if (status != wgpu::RequestAdapterStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to get an adapter: %s\n", message); + return; + } + *static_cast(userdata) = std::move(adapter); }; - void* userdata = &ctx->adapter; - ctx->instance.WaitAny(ctx->instance.RequestAdapter(&options, wgpu::CallbackMode::AllowSpontaneous, callback, userdata), UINT64_MAX); + void * userdata = &ctx->adapter; + ctx->instance.WaitAny( + ctx->instance.RequestAdapter(&options, wgpu::CallbackMode::AllowSpontaneous, callback, userdata), UINT64_MAX); GGML_ASSERT(ctx->adapter != nullptr); ctx->adapter.GetLimits(&ctx->limits); @@ -836,12 +926,19 @@ static ggml_backend_dev_t ggml_backend_webgpu_reg_get_device(ggml_backend_reg_t ctx->adapter.GetInfo(&info); static ggml_backend_webgpu_device_context device_ctx; - device_ctx.webgpu_ctx = ctx; + device_ctx.webgpu_ctx = ctx; device_ctx.device_name = GGML_WEBGPU_NAME; device_ctx.device_desc = std::string(info.description.data); - GGML_LOG_INFO("ggml_webgpu: adapter_info: vendor_id: %u | vendor: %s | architecture: %s | device_id: %u | name: %s | device_desc: %s\n", - info.vendorID, info.vendor.data, info.architecture.data, info.deviceID, info.device.data, info.description.data); + GGML_LOG_INFO( + "ggml_webgpu: adapter_info: vendor_id: %u | vendor: %s | architecture: %s | device_id: %u | name: %s | " + "device_desc: %s\n", + info.vendorID, + info.vendor.data, + info.architecture.data, + info.deviceID, + info.device.data, + info.description.data); // See GGML Backend Device Interface section static ggml_backend_device device = { @@ -852,7 +949,6 @@ static ggml_backend_dev_t ggml_backend_webgpu_reg_get_device(ggml_backend_reg_t return &device; } - static const struct ggml_backend_reg_i ggml_backend_webgpu_reg_i = { /* .get_name = */ ggml_backend_webgpu_reg_get_name, /* .get_device_count = */ ggml_backend_webgpu_reg_get_device_count, @@ -868,15 +964,15 @@ ggml_backend_reg_t ggml_backend_webgpu_reg() { webgpu_context webgpu_ctx = std::make_shared(); static ggml_backend_webgpu_reg_context ctx; - ctx.webgpu_ctx = webgpu_ctx; - ctx.name = GGML_WEBGPU_NAME; + ctx.webgpu_ctx = webgpu_ctx; + ctx.name = GGML_WEBGPU_NAME; ctx.device_count = 1; - wgpu::InstanceDescriptor instance_descriptor{}; + wgpu::InstanceDescriptor instance_descriptor{}; std::vector instance_features = { wgpu::InstanceFeatureName::TimedWaitAny }; - instance_descriptor.requiredFeatures = instance_features.data(); - instance_descriptor.requiredFeatureCount = instance_features.size(); - webgpu_ctx->instance = wgpu::CreateInstance(&instance_descriptor); + instance_descriptor.requiredFeatures = instance_features.data(); + instance_descriptor.requiredFeatureCount = instance_features.size(); + webgpu_ctx->instance = wgpu::CreateInstance(&instance_descriptor); GGML_ASSERT(webgpu_ctx->instance != nullptr); static ggml_backend_reg reg = { From b8012ecc0a6b1972bd6cf307c9ca30cbe9b68b5f Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Thu, 31 Jul 2025 11:02:08 -0700 Subject: [PATCH 05/24] Fix thread-safe implementation --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 89 ++++++++++++++++------------ 1 file changed, 50 insertions(+), 39 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 666bfbe183e41..61f0a19f70fa0 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -13,7 +13,9 @@ #include #include +#include #include +#include #include #ifdef GGML_WEBGPU_DEBUG @@ -61,7 +63,8 @@ struct webgpu_param_bufs { struct webgpu_param_buf_pool { std::vector free; - std::mutex mutex; + std::mutex mutex; + std::condition_variable cv; void init(wgpu::Device device) { @@ -108,19 +111,18 @@ struct webgpu_param_buf_pool { // All the base objects needed to run operations on a WebGPU device struct webgpu_context_struct { - wgpu::Instance instance; - wgpu::Adapter adapter; - wgpu::Device device; - wgpu::Queue queue; - wgpu::Limits limits; - wgpu::SupportedFeatures features; - - std::recursive_mutex submit_mutex; + wgpu::Instance instance; + wgpu::Adapter adapter; + wgpu::Device device; + wgpu::Queue queue; + wgpu::Limits limits; + + std::recursive_mutex mutex; std::mutex get_tensor_mutex; std::mutex init_mutex; - bool device_init = false; - // Parameter buffer pool + bool device_init = false; + webgpu_param_buf_pool param_buf_pool; wgpu::ComputePipeline memset_pipeline; @@ -134,36 +136,33 @@ struct webgpu_context_struct { // Command buffers which need to be submitted std::vector staged_command_bufs; + // Parameter buffers associated with the staged command buffers - std::vector staged_param_bufs; + std::vector staged_param_bufs; }; typedef std::shared_ptr webgpu_context; struct ggml_backend_webgpu_reg_context { webgpu_context webgpu_ctx; - - size_t device_count; - const char * name; + size_t device_count; + const char * name; }; struct ggml_backend_webgpu_device_context { webgpu_context webgpu_ctx; - - std::string device_name; - std::string device_desc; + std::string device_name; + std::string device_desc; }; struct ggml_backend_webgpu_context { webgpu_context webgpu_ctx; - - std::string name; + std::string name; }; struct ggml_backend_webgpu_buffer_context { webgpu_context webgpu_ctx; - - wgpu::Buffer buffer; + wgpu::Buffer buffer; ggml_backend_webgpu_buffer_context(webgpu_context ctx, wgpu::Buffer buf) : webgpu_ctx(std::move(ctx)), @@ -180,10 +179,13 @@ static void ggml_webgpu_create_pipeline(wgpu::Device & const char * label, const std::vector & constants = {}) { WEBGPU_LOG_DEBUG("ggml_webgpu_create_pipeline()"); + wgpu::ShaderSourceWGSL shader_source; shader_source.code = shader_code; + wgpu::ShaderModuleDescriptor shader_desc; - shader_desc.nextInChain = &shader_source; + shader_desc.nextInChain = &shader_source; + wgpu::ShaderModule shader_module = device.CreateShaderModule(&shader_desc); wgpu::ComputePipelineDescriptor pipeline_desc; @@ -210,8 +212,9 @@ static void ggml_webgpu_create_buffer(wgpu::Device & device, buffer_desc.usage = usage; buffer_desc.label = label; buffer_desc.mappedAtCreation = false; + // TODO: error handling - buffer = device.CreateBuffer(&buffer_desc); + buffer = device.CreateBuffer(&buffer_desc); } /** End WebGPU object initializations */ @@ -231,8 +234,7 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { } static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { - std::lock_guard lock(ctx->submit_mutex); - + std::lock_guard lock(ctx->mutex); ctx->queue.Submit(ctx->staged_command_bufs.size(), ctx->staged_command_bufs.data()); ctx->staged_command_bufs.clear(); std::vector staged_param_bufs = std::move(ctx->staged_param_bufs); @@ -274,6 +276,8 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & bool submit_imm = false) { webgpu_param_bufs params_bufs = ctx->param_buf_pool.alloc_bufs(); + std::lock_guard lock(ctx->mutex); + ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); uint32_t * _params = (uint32_t *) params_bufs.host_buf.GetMappedRange(); for (size_t i = 0; i < params.size(); i++) { @@ -315,7 +319,6 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & }); } else { // Enqueue commands and only submit if we have enough staged commands - std::lock_guard lock(ctx->submit_mutex); ctx->staged_command_bufs.push_back(commands); ctx->staged_param_bufs.push_back(params_bufs); if (ctx->staged_command_bufs.size() == WEBGPU_COMMAND_SUBMIT_BATCH_SIZE) { @@ -540,10 +543,12 @@ static void ggml_backend_webgpu_buffer_memset_tensor(ggml_backend_buffer_t buffe WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_memset_tensor(" << buffer << ", " << tensor << ", " << value << ", " << offset << ", " << size << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; - size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; + ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; + + size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; + // This is a trick to set all bytes of a u32 to the same 1 byte value. - uint32_t val32 = (uint32_t) value * 0x01010101; + uint32_t val32 = (uint32_t) value * 0x01010101; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, val32, total_offset, size); } @@ -559,13 +564,16 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; + std::lock_guard lock(webgpu_ctx->mutex); webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size / 4) * 4); if (size % 4 != 0) { // If size is not a multiple of 4, we need to memset the remaining bytes - size_t remaining_size = size % 4; + size_t remaining_size = size % 4; + // pack the remaining bytes into a uint32_t - uint32_t val32 = 0; + uint32_t val32 = 0; + for (size_t i = 0; i < remaining_size; i++) { ((uint8_t *) &val32)[i] = ((const uint8_t *) data)[size - remaining_size + i]; } @@ -613,8 +621,12 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); encoder.CopyBufferToBuffer(buf_ctx->buffer, total_offset, webgpu_ctx->get_tensor_staging_buf, 0, final_size); wgpu::CommandBuffer commands = encoder.Finish(); - // Submit the command buffer to the queue - webgpu_ctx->queue.Submit(1, &commands); + + { + std::lock_guard submit_lock(webgpu_ctx->mutex); + // Submit the command buffer to the queue + webgpu_ctx->queue.Submit(1, &commands); + } // Map the staging buffer to read the data ggml_backend_webgpu_map_buffer(webgpu_ctx, webgpu_ctx->get_tensor_staging_buf, wgpu::MapMode::Read, 0, final_size); @@ -628,7 +640,6 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, static void ggml_backend_webgpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_buffer_clear(" << buffer << ", " << (uint32_t) value << ")"); - ggml_backend_webgpu_buffer_context * buf_ctx = (ggml_backend_webgpu_buffer_context *) buffer->context; ggml_backend_webgpu_buffer_memset(buf_ctx->webgpu_ctx, buf_ctx->buffer, value, 0, buffer->size); } @@ -764,10 +775,11 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co std::lock_guard lock(webgpu_ctx->init_mutex); if (!webgpu_ctx->device_init) { // Initialize device - wgpu::DeviceDescriptor dev_desc; + std::vector required_features = { wgpu::FeatureName::ShaderF16 }; + wgpu::DeviceDescriptor dev_desc; dev_desc.requiredLimits = &webgpu_ctx->limits; - dev_desc.requiredFeatures = webgpu_ctx->features.features; - dev_desc.requiredFeatureCount = webgpu_ctx->features.featureCount; + dev_desc.requiredFeatures = required_features.data(); + dev_desc.requiredFeatureCount = required_features.size(); dev_desc.SetDeviceLostCallback( wgpu::CallbackMode::AllowSpontaneous, [](const wgpu::Device & device, wgpu::DeviceLostReason reason, wgpu::StringView message) { @@ -920,7 +932,6 @@ static ggml_backend_dev_t ggml_backend_webgpu_reg_get_device(ggml_backend_reg_t GGML_ASSERT(ctx->adapter != nullptr); ctx->adapter.GetLimits(&ctx->limits); - ctx->adapter.GetFeatures(&ctx->features); wgpu::AdapterInfo info{}; ctx->adapter.GetInfo(&info); From cddda7e73034d1e8594c4aef5ee56807b058b5d0 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Thu, 31 Jul 2025 12:28:29 -0700 Subject: [PATCH 06/24] Use device implicit synchronization --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 61f0a19f70fa0..91411d9c0014b 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -276,8 +276,6 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & bool submit_imm = false) { webgpu_param_bufs params_bufs = ctx->param_buf_pool.alloc_bufs(); - std::lock_guard lock(ctx->mutex); - ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); uint32_t * _params = (uint32_t *) params_bufs.host_buf.GetMappedRange(); for (size_t i = 0; i < params.size(); i++) { @@ -318,6 +316,8 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & ctx->param_buf_pool.free_bufs({ params_bufs }); }); } else { + // Lock the context mutex when pushing to the staging vectors. + std::lock_guard lock(ctx->mutex); // Enqueue commands and only submit if we have enough staged commands ctx->staged_command_bufs.push_back(commands); ctx->staged_param_bufs.push_back(params_bufs); @@ -564,7 +564,6 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; - std::lock_guard lock(webgpu_ctx->mutex); webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size / 4) * 4); if (size % 4 != 0) { @@ -622,11 +621,8 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, encoder.CopyBufferToBuffer(buf_ctx->buffer, total_offset, webgpu_ctx->get_tensor_staging_buf, 0, final_size); wgpu::CommandBuffer commands = encoder.Finish(); - { - std::lock_guard submit_lock(webgpu_ctx->mutex); - // Submit the command buffer to the queue - webgpu_ctx->queue.Submit(1, &commands); - } + // Submit the command buffer to the queue + webgpu_ctx->queue.Submit(1, &commands); // Map the staging buffer to read the data ggml_backend_webgpu_map_buffer(webgpu_ctx, webgpu_ctx->get_tensor_staging_buf, wgpu::MapMode::Read, 0, final_size); @@ -775,7 +771,7 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co std::lock_guard lock(webgpu_ctx->init_mutex); if (!webgpu_ctx->device_init) { // Initialize device - std::vector required_features = { wgpu::FeatureName::ShaderF16 }; + std::vector required_features = { wgpu::FeatureName::ShaderF16, wgpu::FeatureName::ImplicitDeviceSynchronization }; wgpu::DeviceDescriptor dev_desc; dev_desc.requiredLimits = &webgpu_ctx->limits; dev_desc.requiredFeatures = required_features.data(); From 6a20e396dc54e7f89d6660a7f7525433634dc0e0 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Fri, 1 Aug 2025 10:08:11 -0700 Subject: [PATCH 07/24] Update workflow to use custom release --- .github/workflows/build.yml | 65 ++++++++++--------------------------- 1 file changed, 17 insertions(+), 48 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c6d51fb0c2e7e..0dfe75c637a8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,6 +5,7 @@ on: push: branches: - master + - workflow-updates paths: [ '.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', @@ -159,31 +160,15 @@ jobs: - name: Dawn Dependency id: dawn-depends run: | - ARTIFACTS_JSON=$(curl -s -L \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "https://api.github.com/repos/google/dawn/actions/artifacts") - echo "Finding latest macos-latest-Release artifact..." - DOWNLOAD_URL=$(echo "$ARTIFACTS_JSON" | jq -r '.artifacts - | sort_by(.created_at) - | reverse - | map(select(.name | test("macos-latest-Release$"))) - | .[0].archive_download_url') - if [ "$DOWNLOAD_URL" = "null" ] || [ -z "$DOWNLOAD_URL" ]; then - echo "No suitable Dawn artifact found!" - exit 1 - fi - echo "Downloading from: $DOWNLOAD_URL" - curl -L \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - -o artifact.zip "$DOWNLOAD_URL" - unzip artifact.zip + DAWN_VERSION="v1.0.0" + DAWN_OWNER="reeselevine" + DAWN_REPO="dawn" + DAWN_ASSET_NAME="Dawn-a1a6b45cced25a3b7f4fb491e0ae70796cc7f22b-macos-latest-Release.tar.gz" + echo "Fetching release asset from https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}" + curl -L -o artifact.tar.gz \ + "https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}" mkdir dawn - tar_file=$(find . -name '*.tar.gz' | head -n 1) - echo "Extracting: $tar_file" - tar -xvf "$tar_file" -C dawn --strip-components=1 + tar -xvf artifact.tar.gz -C dawn --strip-components=1 - name: Build id: cmake_build @@ -433,31 +418,15 @@ jobs: id: dawn-depends run: | sudo apt-get install -y libxrandr-dev libxinerama-dev libxcursor-dev mesa-common-dev libx11-xcb-dev libxi-dev - ARTIFACTS_JSON=$(curl -s -L \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "https://api.github.com/repos/google/dawn/actions/artifacts") - echo "Finding latest ubuntu-latest-Release artifact..." - DOWNLOAD_URL=$(echo "$ARTIFACTS_JSON" | jq -r '.artifacts - | sort_by(.created_at) - | reverse - | map(select(.name | test("ubuntu-latest-Release$"))) - | .[0].archive_download_url') - if [ "$DOWNLOAD_URL" = "null" ] || [ -z "$DOWNLOAD_URL" ]; then - echo "No suitable Dawn artifact found!" - exit 1 - fi - echo "Downloading from: $DOWNLOAD_URL" - curl -L \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - -o artifact.zip "$DOWNLOAD_URL" - unzip artifact.zip + DAWN_VERSION="v1.0.0" + DAWN_OWNER="reeselevine" + DAWN_REPO="dawn" + DAWN_ASSET_NAME="Dawn-a1a6b45cced25a3b7f4fb491e0ae70796cc7f22b-ubuntu-latest-Release.tar.gz" + echo "Fetching release asset from https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}" + curl -L -o artifact.tar.gz \ + "https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}" mkdir dawn - tar_file=$(find . -name '*.tar.gz' | head -n 1) - echo "Extracting: $tar_file" - tar -xvf "$tar_file" -C dawn --strip-components=1 + tar -xvf artifact.tar.gz -C dawn --strip-components=1 - name: Build id: cmake_build From ea39068e393662c5002c7c6fab889b478028b3c3 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Fri, 1 Aug 2025 11:00:07 -0700 Subject: [PATCH 08/24] Remove testing branch workflow --- .github/workflows/build.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0dfe75c637a8a..3d4f837e24895 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,7 +5,6 @@ on: push: branches: - master - - workflow-updates paths: [ '.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', From ae8edbfd11656cc2cac934ec84aea4d7fa5ea133 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 11:18:23 -0700 Subject: [PATCH 09/24] Disable set_rows until it's implemented --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3d4f837e24895..63e40c3586285 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -179,6 +179,7 @@ jobs: - name: Test id: cmake_test run: | + export LLAMA_SET_ROWS=0 cd build ctest -L main --verbose --timeout 900 @@ -437,6 +438,7 @@ jobs: - name: Test id: cmake_test run: | + export LLAMA_SET_ROWS=0 cd build # This is using llvmpipe and runs slower than other backends ctest -L main --verbose --timeout 3600 From bfc6930472975b966011d638954129fbd5d465d1 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 19:32:47 -0700 Subject: [PATCH 10/24] Fix potential issue around empty queue submission --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 91411d9c0014b..ecaeceb96b44f 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -235,6 +235,10 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { std::lock_guard lock(ctx->mutex); + if (ctx->staged_command_bufs.empty()) { + // Nothing to submit + return; + } ctx->queue.Submit(ctx->staged_command_bufs.size(), ctx->staged_command_bufs.data()); ctx->staged_command_bufs.clear(); std::vector staged_param_bufs = std::move(ctx->staged_param_bufs); From 69965a819909714a439bbf97c54fbd6a4f21a7bd Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 20:35:05 -0700 Subject: [PATCH 11/24] Try synchronous submission --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index ecaeceb96b44f..dd3b9572a3e04 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -26,7 +26,7 @@ /* Constants */ -#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 16 +#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 1 #define WEBGPU_MUL_MAT_WG_SIZE 64 #define WEBGPU_NUM_PARAM_BUFS 100 #define WEBGPU_PARAMS_BUF_SIZE_BYTES 256 @@ -329,6 +329,7 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & ggml_backend_webgpu_submit_queue(ctx); } } + ggml_backend_webgpu_wait_on_submission(ctx); } static void ggml_backend_webgpu_buffer_memset(webgpu_context & ctx, From c773e2ffbe35116cd11b318ff0734728942018ea Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 20:52:22 -0700 Subject: [PATCH 12/24] Try waiting on all futures explicitly --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index dd3b9572a3e04..22f4bc57e2ba2 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -139,6 +139,8 @@ struct webgpu_context_struct { // Parameter buffers associated with the staged command buffers std::vector staged_param_bufs; + + std::vector callback_futures; }; typedef std::shared_ptr webgpu_context; @@ -221,16 +223,14 @@ static void ggml_webgpu_create_buffer(wgpu::Device & device, /** WebGPU Actions */ +// Wait for the queue to finish processing all submitted work static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { - // Wait for the queue to finish processing all commands - ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone( - wgpu::CallbackMode::AllowSpontaneous, - [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to wait on queue: %s\n", message.data); - } - }), - UINT64_MAX); + std::lock_guard lock(ctx->mutex); + if (ctx->callback_futures.empty()) { + return; + } + ctx->instance.WaitAny(ctx->callback_futures.size(), ctx->callback_futures.data(), UINT64_MAX); + ctx->callback_futures.clear(); } static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { @@ -243,7 +243,7 @@ static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { ctx->staged_command_bufs.clear(); std::vector staged_param_bufs = std::move(ctx->staged_param_bufs); // Free the staged parameter buffers once the submission completes - ctx->queue.OnSubmittedWorkDone( + wgpu::Future f = ctx->queue.OnSubmittedWorkDone( wgpu::CallbackMode::AllowSpontaneous, [ctx, staged_param_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { if (status != wgpu::QueueWorkDoneStatus::Success) { @@ -252,6 +252,7 @@ static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { // Free the staged parameter buffers ctx->param_buf_pool.free_bufs(staged_param_bufs); }); + ctx->callback_futures.push_back({ f }); } static void ggml_backend_webgpu_map_buffer(webgpu_context & ctx, @@ -311,7 +312,7 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & if (submit_imm) { // Submit immediately ctx->queue.Submit(1, &commands); - ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, + wgpu::Future f = ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { if (status != wgpu::QueueWorkDoneStatus::Success) { GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", @@ -319,6 +320,8 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & } ctx->param_buf_pool.free_bufs({ params_bufs }); }); + std::lock_guard lock(ctx->mutex); + ctx->callback_futures.push_back({ f }); } else { // Lock the context mutex when pushing to the staging vectors. std::lock_guard lock(ctx->mutex); From 5aeab733660660a6b217eb268a07bcc51adef4f4 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 21:03:40 -0700 Subject: [PATCH 13/24] Add debug --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 63e40c3586285..e3e1f3a5381b3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -432,7 +432,7 @@ jobs: id: cmake_build run: | export Dawn_DIR=dawn/lib64/cmake/Dawn - cmake -B build -DGGML_WEBGPU=ON + cmake -B build -DGGML_WEBGPU=ON -DGGML_WEBGPU_DEBUG=ON cmake --build build --config Release -j $(nproc) - name: Test From d4af0d6d0737d51923420a6d58e74c76db438171 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Mon, 4 Aug 2025 21:18:11 -0700 Subject: [PATCH 14/24] Add more debug messages --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 22f4bc57e2ba2..5c53d21ab6c05 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -225,6 +225,7 @@ static void ggml_webgpu_create_buffer(wgpu::Device & device, // Wait for the queue to finish processing all submitted work static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_wait_on_submission()"); std::lock_guard lock(ctx->mutex); if (ctx->callback_futures.empty()) { return; @@ -235,6 +236,7 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { std::lock_guard lock(ctx->mutex); + WEBGPU_LOG_DEBUG("ggml_backend_webgpu_submit_queue()"); if (ctx->staged_command_bufs.empty()) { // Nothing to submit return; @@ -376,6 +378,8 @@ static void ggml_backend_webgpu_free(ggml_backend_t backend) { } static void ggml_webgpu_cpy(webgpu_context & ctx, ggml_tensor * src, ggml_tensor * dst) { + WEBGPU_LOG_DEBUG("ggml_webgpu_cpy(" << src << ", " << dst << ")"); + size_t src_offset = ggml_backend_webgpu_tensor_offset(src); // assumes power of 2 offset alignment size_t src_misalignment = src_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); From 320f679666171743a849e8dd08ec8cb58cda5a85 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 07:27:55 -0700 Subject: [PATCH 15/24] Work on getting ssh access for debugging --- .github/workflows/build.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e3e1f3a5381b3..1d4322e743ae2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,6 +5,7 @@ on: push: branches: - master + - webgpu-ci-debug paths: [ '.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', @@ -443,6 +444,9 @@ jobs: # This is using llvmpipe and runs slower than other backends ctest -L main --verbose --timeout 3600 + - name: Debugging with tmate + uses: mxschmitt/action-tmate@v3.22 + ubuntu-22-cmake-hip: runs-on: ubuntu-22.04 container: rocm/dev-ubuntu-22.04:6.0.2 From f42291198454a3c2bee4a193c1dc6b78793403dc Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 07:33:56 -0700 Subject: [PATCH 16/24] Debug on failure --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1d4322e743ae2..2afc4fbf8d207 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -445,6 +445,7 @@ jobs: ctest -L main --verbose --timeout 3600 - name: Debugging with tmate + if: ${{ failure() }} uses: mxschmitt/action-tmate@v3.22 ubuntu-22-cmake-hip: From 0feece50f0f9e12c08890ebf56b67c4e1a30acc6 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 10:25:49 -0700 Subject: [PATCH 17/24] Disable other tests --- .github/workflows/build.yml | 51 +++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2afc4fbf8d207..b6ffdd2bd01bb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -57,6 +57,7 @@ env: jobs: macOS-latest-cmake-arm64: + if: false runs-on: macos-14 steps: @@ -97,6 +98,8 @@ jobs: ctest -L 'main|curl' --verbose --timeout 900 macOS-latest-cmake-x64: + if: false + runs-on: macos-13 steps: @@ -185,6 +188,8 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-cpu-cmake: + if: false + strategy: matrix: include: @@ -238,6 +243,8 @@ jobs: ./bin/llama-cli -m stories260K.gguf -p "One day, Lily met a Shoggoth" -n 500 -c 256 ubuntu-latest-cmake-sanitizer: + if: false + runs-on: ubuntu-latest continue-on-error: true @@ -292,6 +299,8 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-latest-llguidance: + if: false + runs-on: ubuntu-latest steps: @@ -322,6 +331,8 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-latest-cmake-rpc: + if: false + runs-on: ubuntu-latest continue-on-error: true @@ -357,6 +368,8 @@ jobs: ctest -L main --verbose ubuntu-22-cmake-vulkan: + if: false + runs-on: ubuntu-22.04 steps: @@ -449,6 +462,8 @@ jobs: uses: mxschmitt/action-tmate@v3.22 ubuntu-22-cmake-hip: + if: false + runs-on: ubuntu-22.04 container: rocm/dev-ubuntu-22.04:6.0.2 @@ -489,6 +504,8 @@ jobs: cmake --build build2 --config Release -j $(nproc) ubuntu-22-cmake-musa: + if: false + runs-on: ubuntu-22.04 container: mthreads/musa:rc4.2.0-devel-ubuntu22.04-amd64 @@ -517,6 +534,8 @@ jobs: cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl: + if: false + runs-on: ubuntu-22.04 continue-on-error: true @@ -565,6 +584,8 @@ jobs: cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl-fp16: + if: false + runs-on: ubuntu-22.04 continue-on-error: true @@ -614,12 +635,18 @@ jobs: cmake --build build --config Release -j $(nproc) build-linux-cross: + if: false + uses: ./.github/workflows/build-linux-cross.yml build-cmake-pkg: + if: false + uses: ./.github/workflows/build-cmake-pkg.yml macOS-latest-cmake-ios: + if: false + runs-on: macos-latest steps: @@ -657,6 +684,8 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-cmake-tvos: + if: false + runs-on: macos-latest steps: @@ -694,6 +723,8 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-cmake-visionos: + if: false + runs-on: macos-latest steps: @@ -725,6 +756,8 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-swift: + if: false + runs-on: macos-latest strategy: @@ -769,6 +802,8 @@ jobs: ./build-xcframework.sh windows-msys2: + if: false + runs-on: windows-2025 strategy: @@ -819,6 +854,8 @@ jobs: cmake --build build --config ${{ matrix.build }} -j $(nproc) windows-latest-cmake: + if: false + runs-on: windows-2025 env: @@ -949,6 +986,8 @@ jobs: # & $sde -future -- ctest -L main -C Release --verbose --timeout 900 ubuntu-latest-cmake-cuda: + if: false + runs-on: ubuntu-latest container: nvidia/cuda:12.6.2-devel-ubuntu24.04 @@ -982,6 +1021,8 @@ jobs: cmake --build build windows-2022-cmake-cuda: + if: false + runs-on: windows-2022 strategy: @@ -1034,6 +1075,8 @@ jobs: cmake --build build --config Release windows-latest-cmake-sycl: + if: false + runs-on: windows-2022 defaults: @@ -1067,6 +1110,8 @@ jobs: run: examples/sycl/win-build-sycl.bat windows-latest-cmake-hip: + if: false + if: ${{ github.event.inputs.create_release != 'true' }} runs-on: windows-2022 @@ -1124,6 +1169,8 @@ jobs: cmake --build build -j ${env:NUMBER_OF_PROCESSORS} ios-xcode-build: + if: false + runs-on: macos-latest steps: @@ -1156,6 +1203,8 @@ jobs: run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build android-build: + if: false + runs-on: ubuntu-latest steps: @@ -1185,6 +1234,8 @@ jobs: ./gradlew build --no-daemon openEuler-latest-cmake-cann: + if: false + if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} defaults: run: From 0512d6631668f5ec449dc15c9fe7f334b99e5fee Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 10:27:53 -0700 Subject: [PATCH 18/24] Remove extra if --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b6ffdd2bd01bb..64b1c27a7454c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1112,7 +1112,7 @@ jobs: windows-latest-cmake-hip: if: false - if: ${{ github.event.inputs.create_release != 'true' }} + #if: ${{ github.event.inputs.create_release != 'true' }} runs-on: windows-2022 steps: @@ -1236,7 +1236,7 @@ jobs: openEuler-latest-cmake-cann: if: false - if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} + #if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} defaults: run: shell: bash -el {0} From 9335adf51db7f4ba7ec1d6f1ef1de836388d2733 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 10:51:55 -0700 Subject: [PATCH 19/24] Try more locking --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 5c53d21ab6c05..0e6d766baab81 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -118,8 +118,6 @@ struct webgpu_context_struct { wgpu::Limits limits; std::recursive_mutex mutex; - std::mutex get_tensor_mutex; - std::mutex init_mutex; bool device_init = false; @@ -228,10 +226,19 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { WEBGPU_LOG_DEBUG("ggml_backend_webgpu_wait_on_submission()"); std::lock_guard lock(ctx->mutex); if (ctx->callback_futures.empty()) { - return; + // no existing callbacks, wait on queue submission + ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); + } + }); + } else { + // existing callbacks, wait on them + ctx->instance.WaitAny(ctx->callback_futures.size(), ctx->callback_futures.data(), UINT64_MAX); + ctx->callback_futures.clear(); } - ctx->instance.WaitAny(ctx->callback_futures.size(), ctx->callback_futures.data(), UINT64_MAX); - ctx->callback_futures.clear(); } static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { @@ -576,6 +583,8 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; + std::lock_guard lock(webgpu_ctx->mutex); + webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size / 4) * 4); if (size % 4 != 0) { @@ -592,6 +601,8 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_backend_webgpu_buffer_memset( webgpu_ctx, buf_ctx->buffer, val32, total_offset + (size - remaining_size), remaining_size); } + + ggml_backend_webgpu_wait_on_submission(webgpu_ctx); } static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, @@ -614,7 +625,7 @@ static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, final_size = size + (4 - (size % 4)); } - std::lock_guard lock(webgpu_ctx->get_tensor_mutex); + std::lock_guard lock(webgpu_ctx->mutex); if (webgpu_ctx->get_tensor_staging_buf == nullptr || webgpu_ctx->get_tensor_staging_buf.GetSize() < final_size) { // Create a new staging buffer if it doesn't exist or is too small @@ -780,7 +791,7 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co webgpu_context webgpu_ctx = dev_ctx->webgpu_ctx; // Multiple threads may try to initialize the device - std::lock_guard lock(webgpu_ctx->init_mutex); + std::lock_guard lock(webgpu_ctx->mutex); if (!webgpu_ctx->device_init) { // Initialize device std::vector required_features = { wgpu::FeatureName::ShaderF16, wgpu::FeatureName::ImplicitDeviceSynchronization }; From fc9e99dcd04aea888a104acc0cf8e2e8c3df55c2 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 10:55:43 -0700 Subject: [PATCH 20/24] maybe passes? --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 64b1c27a7454c..039c43ff349ca 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -458,7 +458,7 @@ jobs: ctest -L main --verbose --timeout 3600 - name: Debugging with tmate - if: ${{ failure() }} + #if: ${{ failure() }} uses: mxschmitt/action-tmate@v3.22 ubuntu-22-cmake-hip: From 7d9807eb8dc9234ae52469abff225b03d9459abb Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 11:12:50 -0700 Subject: [PATCH 21/24] test --- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 0e6d766baab81..9d9a33a6ef71b 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -26,7 +26,7 @@ /* Constants */ -#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 1 +#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 16 #define WEBGPU_MUL_MAT_WG_SIZE 64 #define WEBGPU_NUM_PARAM_BUFS 100 #define WEBGPU_PARAMS_BUF_SIZE_BYTES 256 @@ -223,7 +223,6 @@ static void ggml_webgpu_create_buffer(wgpu::Device & device, // Wait for the queue to finish processing all submitted work static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { - WEBGPU_LOG_DEBUG("ggml_backend_webgpu_wait_on_submission()"); std::lock_guard lock(ctx->mutex); if (ctx->callback_futures.empty()) { // no existing callbacks, wait on queue submission @@ -341,7 +340,6 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & ggml_backend_webgpu_submit_queue(ctx); } } - ggml_backend_webgpu_wait_on_submission(ctx); } static void ggml_backend_webgpu_buffer_memset(webgpu_context & ctx, @@ -356,6 +354,7 @@ static void ggml_backend_webgpu_buffer_memset(webgpu_context & ctx, size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; uint32_t wg_x = ((size + 3) + bytes_per_wg - 1) / bytes_per_wg; ggml_backend_webgpu_build_and_enqueue(ctx, ctx->memset_pipeline, params, entries, wg_x, true); + ggml_backend_webgpu_wait_on_submission(ctx); } static size_t ggml_backend_webgpu_tensor_offset(const ggml_tensor * tensor) { @@ -385,8 +384,6 @@ static void ggml_backend_webgpu_free(ggml_backend_t backend) { } static void ggml_webgpu_cpy(webgpu_context & ctx, ggml_tensor * src, ggml_tensor * dst) { - WEBGPU_LOG_DEBUG("ggml_webgpu_cpy(" << src << ", " << dst << ")"); - size_t src_offset = ggml_backend_webgpu_tensor_offset(src); // assumes power of 2 offset alignment size_t src_misalignment = src_offset & (ctx->limits.minStorageBufferOffsetAlignment - 1); @@ -583,8 +580,6 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, size_t total_offset = webgpu_tensor_offset(tensor) + tensor->view_offs + offset; - std::lock_guard lock(webgpu_ctx->mutex); - webgpu_ctx->queue.WriteBuffer(buf_ctx->buffer, total_offset, data, (size / 4) * 4); if (size % 4 != 0) { @@ -600,9 +595,10 @@ static void ggml_backend_webgpu_buffer_set_tensor(ggml_backend_buffer_t buffer, // memset the remaining bytes ggml_backend_webgpu_buffer_memset( webgpu_ctx, buf_ctx->buffer, val32, total_offset + (size - remaining_size), remaining_size); + } else { + // wait for WriteBuffer to complete + ggml_backend_webgpu_wait_on_submission(webgpu_ctx); } - - ggml_backend_webgpu_wait_on_submission(webgpu_ctx); } static void ggml_backend_webgpu_buffer_get_tensor(ggml_backend_buffer_t buffer, From f7745c424fb759d3ad8244a1b7cb9bedf67c115d Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 11:23:23 -0700 Subject: [PATCH 22/24] Some cleanups --- .github/workflows/build.yml | 2 +- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 45 ++++++++++++++-------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 039c43ff349ca..b2bf018def963 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -446,7 +446,7 @@ jobs: id: cmake_build run: | export Dawn_DIR=dawn/lib64/cmake/Dawn - cmake -B build -DGGML_WEBGPU=ON -DGGML_WEBGPU_DEBUG=ON + cmake -B build -DGGML_WEBGPU=ON cmake --build build --config Release -j $(nproc) - name: Test diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 9d9a33a6ef71b..5009e26a2065e 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -226,13 +226,14 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) { std::lock_guard lock(ctx->mutex); if (ctx->callback_futures.empty()) { // no existing callbacks, wait on queue submission - ctx->queue.OnSubmittedWorkDone( - wgpu::CallbackMode::AllowSpontaneous, - [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); - } - }); + ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); + } + }), + UINT64_MAX); } else { // existing callbacks, wait on them ctx->instance.WaitAny(ctx->callback_futures.size(), ctx->callback_futures.data(), UINT64_MAX); @@ -250,6 +251,7 @@ static void ggml_backend_webgpu_submit_queue(webgpu_context & ctx) { ctx->queue.Submit(ctx->staged_command_bufs.size(), ctx->staged_command_bufs.data()); ctx->staged_command_bufs.clear(); std::vector staged_param_bufs = std::move(ctx->staged_param_bufs); + // Free the staged parameter buffers once the submission completes wgpu::Future f = ctx->queue.OnSubmittedWorkDone( wgpu::CallbackMode::AllowSpontaneous, @@ -286,7 +288,7 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & std::vector params, std::vector bind_group_entries, uint32_t wg_x, - bool submit_imm = false) { + bool submit_and_wait = false) { webgpu_param_bufs params_bufs = ctx->param_buf_pool.alloc_bufs(); ggml_backend_webgpu_map_buffer(ctx, params_bufs.host_buf, wgpu::MapMode::Write, 0, params_bufs.host_buf.GetSize()); @@ -317,19 +319,18 @@ static void ggml_backend_webgpu_build_and_enqueue(webgpu_context & pass.DispatchWorkgroups(wg_x, 1, 1); pass.End(); wgpu::CommandBuffer commands = encoder.Finish(); - if (submit_imm) { - // Submit immediately + if (submit_and_wait) { + // Submit and wait immediately ctx->queue.Submit(1, &commands); - wgpu::Future f = ctx->queue.OnSubmittedWorkDone(wgpu::CallbackMode::AllowSpontaneous, - [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { - if (status != wgpu::QueueWorkDoneStatus::Success) { - GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", - message.data); - } - ctx->param_buf_pool.free_bufs({ params_bufs }); - }); - std::lock_guard lock(ctx->mutex); - ctx->callback_futures.push_back({ f }); + ctx->instance.WaitAny(ctx->queue.OnSubmittedWorkDone( + wgpu::CallbackMode::AllowSpontaneous, + [ctx, params_bufs](wgpu::QueueWorkDoneStatus status, wgpu::StringView message) { + if (status != wgpu::QueueWorkDoneStatus::Success) { + GGML_LOG_ERROR("ggml_webgpu: Failed to submit commands: %s\n", message.data); + } + ctx->param_buf_pool.free_bufs({ params_bufs }); + }), + UINT64_MAX); } else { // Lock the context mutex when pushing to the staging vectors. std::lock_guard lock(ctx->mutex); @@ -354,7 +355,6 @@ static void ggml_backend_webgpu_buffer_memset(webgpu_context & ctx, size_t bytes_per_wg = ctx->limits.maxComputeWorkgroupSizeX * ctx->memset_bytes_per_thread; uint32_t wg_x = ((size + 3) + bytes_per_wg - 1) / bytes_per_wg; ggml_backend_webgpu_build_and_enqueue(ctx, ctx->memset_pipeline, params, entries, wg_x, true); - ggml_backend_webgpu_wait_on_submission(ctx); } static size_t ggml_backend_webgpu_tensor_offset(const ggml_tensor * tensor) { @@ -790,7 +790,8 @@ static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, co std::lock_guard lock(webgpu_ctx->mutex); if (!webgpu_ctx->device_init) { // Initialize device - std::vector required_features = { wgpu::FeatureName::ShaderF16, wgpu::FeatureName::ImplicitDeviceSynchronization }; + std::vector required_features = { wgpu::FeatureName::ShaderF16, + wgpu::FeatureName::ImplicitDeviceSynchronization }; wgpu::DeviceDescriptor dev_desc; dev_desc.requiredLimits = &webgpu_ctx->limits; dev_desc.requiredFeatures = required_features.data(); From 4dc409a7f401474369ba744aa3e8e85c8a39166e Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 11:28:18 -0700 Subject: [PATCH 23/24] Restore build file --- .github/workflows/build.yml | 59 ++----------------------------------- 1 file changed, 2 insertions(+), 57 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b2bf018def963..cbbd30c5784cd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -57,7 +57,6 @@ env: jobs: macOS-latest-cmake-arm64: - if: false runs-on: macos-14 steps: @@ -98,8 +97,6 @@ jobs: ctest -L 'main|curl' --verbose --timeout 900 macOS-latest-cmake-x64: - if: false - runs-on: macos-13 steps: @@ -188,8 +185,6 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-cpu-cmake: - if: false - strategy: matrix: include: @@ -243,8 +238,6 @@ jobs: ./bin/llama-cli -m stories260K.gguf -p "One day, Lily met a Shoggoth" -n 500 -c 256 ubuntu-latest-cmake-sanitizer: - if: false - runs-on: ubuntu-latest continue-on-error: true @@ -299,8 +292,6 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-latest-llguidance: - if: false - runs-on: ubuntu-latest steps: @@ -331,8 +322,6 @@ jobs: ctest -L main --verbose --timeout 900 ubuntu-latest-cmake-rpc: - if: false - runs-on: ubuntu-latest continue-on-error: true @@ -368,8 +357,6 @@ jobs: ctest -L main --verbose ubuntu-22-cmake-vulkan: - if: false - runs-on: ubuntu-22.04 steps: @@ -457,13 +444,7 @@ jobs: # This is using llvmpipe and runs slower than other backends ctest -L main --verbose --timeout 3600 - - name: Debugging with tmate - #if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.22 - ubuntu-22-cmake-hip: - if: false - runs-on: ubuntu-22.04 container: rocm/dev-ubuntu-22.04:6.0.2 @@ -504,8 +485,6 @@ jobs: cmake --build build2 --config Release -j $(nproc) ubuntu-22-cmake-musa: - if: false - runs-on: ubuntu-22.04 container: mthreads/musa:rc4.2.0-devel-ubuntu22.04-amd64 @@ -534,8 +513,6 @@ jobs: cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl: - if: false - runs-on: ubuntu-22.04 continue-on-error: true @@ -584,8 +561,6 @@ jobs: cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl-fp16: - if: false - runs-on: ubuntu-22.04 continue-on-error: true @@ -635,18 +610,12 @@ jobs: cmake --build build --config Release -j $(nproc) build-linux-cross: - if: false - uses: ./.github/workflows/build-linux-cross.yml build-cmake-pkg: - if: false - uses: ./.github/workflows/build-cmake-pkg.yml macOS-latest-cmake-ios: - if: false - runs-on: macos-latest steps: @@ -684,8 +653,6 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-cmake-tvos: - if: false - runs-on: macos-latest steps: @@ -723,8 +690,6 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-cmake-visionos: - if: false - runs-on: macos-latest steps: @@ -756,8 +721,6 @@ jobs: cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-swift: - if: false - runs-on: macos-latest strategy: @@ -802,8 +765,6 @@ jobs: ./build-xcframework.sh windows-msys2: - if: false - runs-on: windows-2025 strategy: @@ -854,8 +815,6 @@ jobs: cmake --build build --config ${{ matrix.build }} -j $(nproc) windows-latest-cmake: - if: false - runs-on: windows-2025 env: @@ -986,8 +945,6 @@ jobs: # & $sde -future -- ctest -L main -C Release --verbose --timeout 900 ubuntu-latest-cmake-cuda: - if: false - runs-on: ubuntu-latest container: nvidia/cuda:12.6.2-devel-ubuntu24.04 @@ -1021,8 +978,6 @@ jobs: cmake --build build windows-2022-cmake-cuda: - if: false - runs-on: windows-2022 strategy: @@ -1075,8 +1030,6 @@ jobs: cmake --build build --config Release windows-latest-cmake-sycl: - if: false - runs-on: windows-2022 defaults: @@ -1110,9 +1063,7 @@ jobs: run: examples/sycl/win-build-sycl.bat windows-latest-cmake-hip: - if: false - - #if: ${{ github.event.inputs.create_release != 'true' }} + if: ${{ github.event.inputs.create_release != 'true' }} runs-on: windows-2022 steps: @@ -1169,8 +1120,6 @@ jobs: cmake --build build -j ${env:NUMBER_OF_PROCESSORS} ios-xcode-build: - if: false - runs-on: macos-latest steps: @@ -1203,8 +1152,6 @@ jobs: run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build android-build: - if: false - runs-on: ubuntu-latest steps: @@ -1234,9 +1181,7 @@ jobs: ./gradlew build --no-daemon openEuler-latest-cmake-cann: - if: false - - #if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} + if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} defaults: run: shell: bash -el {0} From 3b81c997ba6fa0917008bc065283618b31e47606 Mon Sep 17 00:00:00 2001 From: Reese Levine Date: Tue, 5 Aug 2025 11:42:26 -0700 Subject: [PATCH 24/24] Remove extra testing branch ci --- .github/workflows/build.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cbbd30c5784cd..63e40c3586285 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,7 +5,6 @@ on: push: branches: - master - - webgpu-ci-debug paths: [ '.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml',