From eaa165b0f4c0d64b5f2e1c6605ded8cec71526c0 Mon Sep 17 00:00:00 2001 From: SS-JIA Date: Mon, 18 Aug 2025 11:16:30 -0400 Subject: [PATCH] [ET-VK] Allocate memory for weight and activation tensors lazily Summary: * Allocate memory for weight tensors right before the prepacking shader is dispatched, rather than while building the graph * Move allocation of shared objects (i.e. memory for intermediate tensors) to occur after prepacking ## Motivation Prevent screen blackout (Llama 3.2 1B) / device crash (Llama 3.2 3B) when running Llama 3.2 models on Samsung Galaxy S24. This behaviour is related to high peak memory usage when loading the model. ## Full Context During model loading, Vulkan delegate needs to store 3 copies of constant data in memory at various points: * source data obtained from loading the model * staging buffer * GPU texture/buffer The general rationale of this change is to allocate memory for each copy only when necessary to minimize the "overlap" when all 3 exist at once. ### Current Order of operations Legend: * `W` represents total weight nbytes * `w` represents weight nbytes for one tensor * `A` represents total activations nbytes * `M` represents approximation of total memory footprint First, model file is loaded Then, when building compute graph, for each weight tensor: 1. Weight data is loaded from NamedDataMap (`M = W`) 2. GPU texture/buffer for weight is initialized + memory allocated (`M = 2W`) 3. After building the graph, `graph->prepare()` is called which currently allocates memory for the activation tensors as well (`M = 2W + A`) Then, during the prepacking stage for each weight tensor, each weight tensor is copied individually: 1. Staging buffer initialized (`M = 2W + A + w`) 2. Copy CPU weight data to staging + CPU Weight data is freed (`M = 2W + A`) 3. Compute shader dispatch to copy staging to GPU texture/buffer + free staging buffer (`M = 2W + A - w`) The peak usage in mainline will be `M = 2W + A + w` ### Revised order of operations This change revises the order of operations: 1. Weight data is loaded from NamedDataMap (`M = W`) 2. GPU texture/buffer for weight is initialized, but **memory is not allocated** (`M = W`) Then, during the prepacking stage for each weight tensor, each weight tensor is copied individually: 1. Staging buffer initialized (`M = W + w`) 2. **Memory allocated for GPU texture/buffer** (`M = W + 2w`) 3. Copy CPU weight data to staging + CPU Weight data is freed (`M = W + w`) 4. Compute shader dispatch to copy staging to GPU texture/buffer + free staging buffer (`M = W`) **Then, after all prepacking operations complete, only then is Activation memory allocated** (`M = W + A`) Under this scheme, peak memory is reduced to `M = W + A` (or alternatively `M = W + 2w` if `2w > A`) which is (or at least very close to) the theoretical minimum. Test Plan: ## Logging Memory Usage Using ``` uint64_t getVmRssInKB() { std::ifstream statusFile("/proc/self/status"); std::string l, num; while (std::getline(statusFile, l)) { if (l.substr(0, 5) == "VmRSS") { size_t pos = l.find_first_of("0123456789"); num = l.substr(pos); break; } } uint64_t vmRssInKB = std::stoi(num); return vmRssInKB; } uint64_t getVmaStatsInKB() { auto stats = vkcompute::api::context()->adapter_ptr()->vma().get_memory_statistics(); uint64_t vmaBlockInKB = stats.total.statistics.blockBytes >> 10; return vmaBlockInKB; } ``` to log memory footprint at various points of inference when running the llama_runner binary with Llama 3.2 1B, we can compare the memory footprint with and without these changes. With changes: P1908051860 (Meta only) ``` Memory usage before model compilation: 1115760 KB (VmRSS), 0 KB (VMA) Memory usage after graph building: 1924832 KB (VmRSS), 17920 KB (VMA) Memory usage after graph preparation: 1935312 KB (VmRSS), 17920 KB (VMA) Memory usage prepack start: 1935312 KB, VMA Block: 17920 KB Memory usage after prepack operations: 1372376 KB (VmRSS), 2330528 KB (VMA) Memory usage before execute: 1372804 KB (VmRSS), 2330528 KB (VMA) Memory usage at end of execute: 1376916 KB (VmRSS), 2330528 KB (VMA) ``` WIthout changes: P1908054759 (Meta only) ``` Memory usage before model compilation: 1114784 KB (VmRSS), 0 KB (VMA) Memory usage after graph building: 1924432 KB (VmRSS), 962464 KB (VMA) Memory usage after graph preparation: 1922916 KB (VmRSS), 2326432 KB (VMA) Memory usage prepack start: 1922916 KB, VMA Block: 2326432 KB Memory usage after prepack operations: 1359180 KB (VmRSS), 2330528 KB (VMA) Memory usage before execute: 1359492 KB (VmRSS), 2330528 KB (VMA) Memory usage at end of execute: 1363636 KB (VmRSS), 2330528 KB (VMA) ``` It is evident how peak memory can be reduced with these changes, as VMA footprint gradually increases while loading the model while VmRss gradually decreases. Without these changes, VMA footprint will reach its peak after initializing the graph. Visually, it can also be verified that Samsung Galaxy S24's screen no longer blacks out while loading the model. [ghstack-poisoned] --- .../vulkan/runtime/api/containers/Tensor.cpp | 22 ++++++++++++++ .../vulkan/runtime/api/containers/Tensor.h | 11 +++++++ .../vulkan/runtime/graph/ComputeGraph.cpp | 28 +++++++++++------ backends/vulkan/runtime/graph/ComputeGraph.h | 7 +++++ .../vulkan/runtime/graph/ops/PrepackNode.cpp | 4 +++ .../vulkan/runtime/vk_api/memory/Buffer.cpp | 17 +++++++++++ .../vulkan/runtime/vk_api/memory/Buffer.h | 22 +++++++++----- .../vulkan/runtime/vk_api/memory/Image.cpp | 24 +++++++++++++++ backends/vulkan/runtime/vk_api/memory/Image.h | 30 +++++++++---------- 9 files changed, 134 insertions(+), 31 deletions(-) diff --git a/backends/vulkan/runtime/api/containers/Tensor.cpp b/backends/vulkan/runtime/api/containers/Tensor.cpp index a3d9bd4aa34..6f7167c54fb 100644 --- a/backends/vulkan/runtime/api/containers/Tensor.cpp +++ b/backends/vulkan/runtime/api/containers/Tensor.cpp @@ -897,6 +897,16 @@ VkMemoryRequirements vTensor::get_memory_requirements() const { return {}; } +bool vTensor::memory_is_bound() const { + switch (storage_type()) { + case utils::kBuffer: + return storage_->buffer_.has_memory(); + case utils::kTexture2D: + case utils::kTexture3D: + return storage_->image_.has_memory(); + } +} + void vTensor::bind_allocation(const vkapi::Allocation& allocation) { switch (storage_type()) { case utils::kBuffer: @@ -909,6 +919,18 @@ void vTensor::bind_allocation(const vkapi::Allocation& allocation) { } } +void vTensor::acquire_allocation(vkapi::Allocation&& allocation) { + switch (storage_type()) { + case utils::kBuffer: + storage_->buffer_.acquire_allocation(std::move(allocation)); + break; + case utils::kTexture2D: + case utils::kTexture3D: + storage_->image_.acquire_allocation(std::move(allocation)); + break; + } +} + void vTensor::update_metadata() { numel_ = utils::multiply_integers(sizes_); strides_ = calculate_strides(sizes_, dim_order_); diff --git a/backends/vulkan/runtime/api/containers/Tensor.h b/backends/vulkan/runtime/api/containers/Tensor.h index 0e1a1526d88..bcca956e5ea 100644 --- a/backends/vulkan/runtime/api/containers/Tensor.h +++ b/backends/vulkan/runtime/api/containers/Tensor.h @@ -560,6 +560,12 @@ class vTensor final { */ VmaAllocationCreateInfo get_allocation_create_info() const; + /* + * Checks if the tensor's underlying buffer or image resource is bound to a + * memory allocation. + */ + bool memory_is_bound() const; + /* * Return the VkMemoryRequirements of the underlying resource */ @@ -570,6 +576,11 @@ class vTensor final { */ void bind_allocation(const vkapi::Allocation& allocation); + /* + * Binds and acquires a rvalue memory allocation + */ + void acquire_allocation(vkapi::Allocation&& allocation); + private: /* * Assuming sizes, dim order, or axis mapping was modified, recompute all diff --git a/backends/vulkan/runtime/graph/ComputeGraph.cpp b/backends/vulkan/runtime/graph/ComputeGraph.cpp index 65ce71a9ce6..dac309e8350 100644 --- a/backends/vulkan/runtime/graph/ComputeGraph.cpp +++ b/backends/vulkan/runtime/graph/ComputeGraph.cpp @@ -356,8 +356,6 @@ ValueRef ComputeGraph::add_tensor( const utils::GPUMemoryLayout memory_layout, const int64_t shared_object_idx, const utils::AxisMapLayout axis_map_layout) { - bool allocate_memory = shared_object_idx < 0; - ValueRef idx(static_cast(values_.size())); check_no_active_value_ptrs(); values_.emplace_back(api::vTensor( @@ -366,10 +364,10 @@ ValueRef ComputeGraph::add_tensor( dtype, storage_type, memory_layout, - allocate_memory, + false, axis_map_layout)); - if (!allocate_memory) { + if (shared_object_idx >= 0) { get_shared_object(shared_object_idx).add_user(this, idx); } return idx; @@ -626,6 +624,17 @@ SharedObject& ComputeGraph::get_shared_object(const int64_t idx) { return shared_objects_.at(idx); } +void ComputeGraph::create_dedicated_allocation_for(const ValueRef idx) { + vTensorPtr tensor = get_tensor(idx); + if (!tensor->memory_is_bound()) { + VmaAllocationCreateInfo alloc_create_info = + context()->adapter_ptr()->vma().gpuonly_resource_create_info(); + tensor->acquire_allocation( + context()->adapter_ptr()->vma().create_allocation( + tensor->get_memory_requirements(), alloc_create_info)); + } +} + void ComputeGraph::update_descriptor_counts( const vkapi::ShaderInfo& shader_info, bool execute) { @@ -823,11 +832,6 @@ void ComputeGraph::prepare() { if (config_.enable_querypool) { context_->initialize_querypool(); } - - for (SharedObject& shared_object : shared_objects_) { - shared_object.allocate(this); - shared_object.bind_users(this); - } } void ComputeGraph::prepare_pipelines() { @@ -950,6 +954,12 @@ void ComputeGraph::prepack() { submit_current_cmd_and_wait(/*final_use=*/true); context_->flush(); staging_nbytes_in_cmd_ = 0; + + // Initialize allocations for intermediate tensors + for (SharedObject& shared_object : shared_objects_) { + shared_object.allocate(this); + shared_object.bind_users(this); + } } void ComputeGraph::execute() { diff --git a/backends/vulkan/runtime/graph/ComputeGraph.h b/backends/vulkan/runtime/graph/ComputeGraph.h index f594571f9a7..7686aa65025 100644 --- a/backends/vulkan/runtime/graph/ComputeGraph.h +++ b/backends/vulkan/runtime/graph/ComputeGraph.h @@ -827,6 +827,13 @@ class ComputeGraph final { SharedObject& get_shared_object(const int64_t idx); + /* + * Creates a dedicated memory allocation for a vTensor value, and have the + * tensor acquire the allocation object. If the tensor is already bound to a + * memory allocation, this function will be a no-op. + */ + void create_dedicated_allocation_for(const ValueRef idx); + // // Graph Preparation // diff --git a/backends/vulkan/runtime/graph/ops/PrepackNode.cpp b/backends/vulkan/runtime/graph/ops/PrepackNode.cpp index 03df92292f8..62e1dc86f43 100644 --- a/backends/vulkan/runtime/graph/ops/PrepackNode.cpp +++ b/backends/vulkan/runtime/graph/ops/PrepackNode.cpp @@ -97,6 +97,10 @@ void PrepackNode::encode(ComputeGraph* graph) { } { + // If the vTensor is not yet bound to a memory allocation, create a new one + // and aquire it. + graph->create_dedicated_allocation_for(packed_); + vkapi::PipelineBarrier pipeline_barrier{}; vkapi::DescriptorSet descriptor_set = context->get_descriptor_set( shader_, local_workgroup_size_, spec_vars_, push_constants_offset); diff --git a/backends/vulkan/runtime/vk_api/memory/Buffer.cpp b/backends/vulkan/runtime/vk_api/memory/Buffer.cpp index 4f58e07b146..51fd5ce5684 100644 --- a/backends/vulkan/runtime/vk_api/memory/Buffer.cpp +++ b/backends/vulkan/runtime/vk_api/memory/Buffer.cpp @@ -136,6 +136,23 @@ VmaAllocationInfo VulkanBuffer::allocation_info() const { return info; } +void VulkanBuffer::bind_allocation_impl(const Allocation& memory) { + VK_CHECK_COND(!memory_, "Cannot bind an already bound allocation!"); + if (!is_copy_) { + VK_CHECK(vmaBindBufferMemory(allocator_, memory.allocation, handle_)); + } +} + +void VulkanBuffer::bind_allocation(const Allocation& memory) { + bind_allocation_impl(memory); + memory_.allocation = memory.allocation; +} + +void VulkanBuffer::acquire_allocation(Allocation&& memory) { + bind_allocation_impl(memory); + memory_ = std::move(memory); +} + VkMemoryRequirements VulkanBuffer::get_memory_requirements() const { VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(this->device(), handle_, &memory_requirements); diff --git a/backends/vulkan/runtime/vk_api/memory/Buffer.h b/backends/vulkan/runtime/vk_api/memory/Buffer.h index e1b441397b4..f3fe7d0b639 100644 --- a/backends/vulkan/runtime/vk_api/memory/Buffer.h +++ b/backends/vulkan/runtime/vk_api/memory/Buffer.h @@ -162,13 +162,21 @@ class VulkanBuffer final { return (handle_ == other.handle_) && is_copy_; } - inline void bind_allocation(const Allocation& memory) { - VK_CHECK_COND(!memory_, "Cannot bind an already bound allocation!"); - if (!is_copy_) { - VK_CHECK(vmaBindBufferMemory(allocator_, memory.allocation, handle_)); - } - memory_.allocation = memory.allocation; - } + private: + void bind_allocation_impl(const Allocation& memory); + + public: + /* + * Given a memory allocation, bind it to the underlying VkImage. The lifetime + * of the memory allocation is assumed to be managed externally. + */ + void bind_allocation(const Allocation& memory); + + /* + * Given a rvalue memory allocation, bind it to the underlying VkImage and + * also acquire ownership of the memory allocation. + */ + void acquire_allocation(Allocation&& memory); VkMemoryRequirements get_memory_requirements() const; diff --git a/backends/vulkan/runtime/vk_api/memory/Image.cpp b/backends/vulkan/runtime/vk_api/memory/Image.cpp index da6ff76bccd..c34ab2d608f 100644 --- a/backends/vulkan/runtime/vk_api/memory/Image.cpp +++ b/backends/vulkan/runtime/vk_api/memory/Image.cpp @@ -319,6 +319,30 @@ void VulkanImage::create_image_view() { &(handles_.image_view))); } +void VulkanImage::bind_allocation_impl(const Allocation& memory) { + VK_CHECK_COND(!memory_, "Cannot bind an already bound allocation!"); + // To prevent multiple instances of binding the same VkImage to a memory + // block, do not actually bind memory if this VulkanImage is a copy. Assume + // that the original VulkanImage is responsible for binding the image. + if (!is_copy_) { + VK_CHECK(vmaBindImageMemory(allocator_, memory.allocation, handles_.image)); + } + + // Only create the image view if the image has been bound to memory + owns_view_ = true; + create_image_view(); +} + +void VulkanImage::bind_allocation(const Allocation& memory) { + bind_allocation_impl(memory); + memory_.allocation = memory.allocation; +} + +void VulkanImage::acquire_allocation(Allocation&& memory) { + bind_allocation_impl(memory); + memory_ = std::move(memory); +} + VkMemoryRequirements VulkanImage::get_memory_requirements() const { VkMemoryRequirements memory_requirements; vkGetImageMemoryRequirements( diff --git a/backends/vulkan/runtime/vk_api/memory/Image.h b/backends/vulkan/runtime/vk_api/memory/Image.h index 5bbdaf06b47..483b3e038bc 100644 --- a/backends/vulkan/runtime/vk_api/memory/Image.h +++ b/backends/vulkan/runtime/vk_api/memory/Image.h @@ -242,21 +242,21 @@ class VulkanImage final { return (handles_.image == other.handles_.image) && is_copy_; } - inline void bind_allocation(const Allocation& memory) { - VK_CHECK_COND(!memory_, "Cannot bind an already bound allocation!"); - // To prevent multiple instances of binding the same VkImage to a memory - // block, do not actually bind memory if this VulkanImage is a copy. Assume - // that the original VulkanImage is responsible for binding the image. - if (!is_copy_) { - VK_CHECK( - vmaBindImageMemory(allocator_, memory.allocation, handles_.image)); - } - memory_.allocation = memory.allocation; - - // Only create the image view if the image has been bound to memory - owns_view_ = true; - create_image_view(); - } + private: + void bind_allocation_impl(const Allocation& memory); + + public: + /* + * Given a memory allocation, bind it to the underlying VkImage. The lifetime + * of the memory allocation is assumed to be managed externally. + */ + void bind_allocation(const Allocation& memory); + + /* + * Given a rvalue memory allocation, bind it to the underlying VkImage and + * also acquire ownership of the memory allocation. + */ + void acquire_allocation(Allocation&& memory); VkMemoryRequirements get_memory_requirements() const;