From db0444f57200f4dc8735312e9fcd3d2f0fd3ca3b Mon Sep 17 00:00:00 2001 From: yangxiao Date: Fri, 30 May 2025 00:23:47 +0800 Subject: [PATCH 01/10] 1. add "integrated" in ggml_cuda_device_info for distinguish whether it is Intergrate_gpu or discrete_gpu 2. Adjust the func:"ggml_backend_cuda_device_supports_buft" for this new feature --- ggml/src/ggml-cuda/common.cuh | 1 + ggml/src/ggml-cuda/ggml-cuda.cu | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index df450b187889b..e1ce1d4cd1558 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -635,6 +635,7 @@ struct ggml_cuda_device_info { int nsm; // number of streaming multiprocessors size_t smpb; // max. shared memory per block size_t smpbo; // max. shared memory per block (with opt-in) + bool integrated; // Device is integrated as opposed to discrete bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory size_t total_vram; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index c442a64924303..464ee116e8a15 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -243,7 +243,7 @@ static ggml_cuda_device_info ggml_cuda_init() { info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; - + info.devices[id].integrated= prop.integrated; info.devices[id].nsm = prop.multiProcessorCount; info.devices[id].smpb = prop.sharedMemPerBlock; info.devices[id].warp_size = prop.warpSize; @@ -1065,6 +1065,10 @@ static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_ GGML_UNUSED(buft); } +static bool ggml_backend_buft_is_cuda_host(ggml_backend_buffer_type_t buft) { + return buft->iface.get_name == ggml_backend_cuda_host_buffer_type_name; +} + static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { CUDA_CHECK(cudaFreeHost(buffer->context)); } @@ -3263,7 +3267,13 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g } static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev; + ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; + const int integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; + if(integrated){ + return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) ||ggml_backend_buft_is_cuda_host(buft)) && buft->device == dev; + }else{ + return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev; + } } static int64_t get_op_batch_size(const ggml_tensor * op) { From 99cbf626b40ae28da7d2c50a2c4b0b248bb7687c Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 10:30:00 +0800 Subject: [PATCH 02/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusted code indentation Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 464ee116e8a15..60a44cca9b771 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -243,10 +243,10 @@ static ggml_cuda_device_info ggml_cuda_init() { info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; - info.devices[id].integrated= prop.integrated; - info.devices[id].nsm = prop.multiProcessorCount; - info.devices[id].smpb = prop.sharedMemPerBlock; - info.devices[id].warp_size = prop.warpSize; + info.devices[id].integrated = prop.integrated; + info.devices[id].nsm = prop.multiProcessorCount; + info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].warp_size = prop.warpSize; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; From eea49ab2fdb56d266a51a66c3d74a131121dc57e Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 10:31:32 +0800 Subject: [PATCH 03/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed incorrect setting of variable types Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 60a44cca9b771..8eb7e9e93a827 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3268,7 +3268,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; - const int integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; + const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; if(integrated){ return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) ||ggml_backend_buft_is_cuda_host(buft)) && buft->device == dev; }else{ From 075fae34645db7bbf6325db687c2a538ad05ae6d Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 10:32:27 +0800 Subject: [PATCH 04/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusted the judgment logic Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 8eb7e9e93a827..d2ac61921cdff 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3269,11 +3269,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; - if(integrated){ - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) ||ggml_backend_buft_is_cuda_host(buft)) && buft->device == dev; - }else{ - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev; - } + return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) || (integrated && ggml_backend_buft_is_cuda_host(buft))) && buft->device == dev; } static int64_t get_op_batch_size(const ggml_tensor * op) { From bd21613cf7a94e723fb86ad6d01b57fa9336f53a Mon Sep 17 00:00:00 2001 From: yangxiao Date: Fri, 30 May 2025 14:54:53 +0800 Subject: [PATCH 05/10] add a host_buft assert in case of integrated_cuda_device with func:'evaluate_and_capture_cuda_graph()' --- ggml/src/ggml-cuda/ggml-cuda.cu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index d2ac61921cdff..b88407fc867d4 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2645,6 +2645,8 @@ static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) { + //flag used to determine whether it is an integrated_gpu + const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated; while (!graph_evaluated_or_captured) { // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. @@ -2663,7 +2665,7 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx if (node->src[j] != nullptr) { assert(node->src[j]->buffer); assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || - ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft)); + ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } #endif From 1959c243cbdfe1e202ce239daa223a6e6d9d8161 Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 19:18:42 +0800 Subject: [PATCH 06/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a defensive security assert Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index b88407fc867d4..9f5db8e77e463 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -247,6 +247,7 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].nsm = prop.multiProcessorCount; info.devices[id].smpb = prop.sharedMemPerBlock; info.devices[id].warp_size = prop.warpSize; + GGML_ASSERT(!integrated || prop.canUseHostPointerForRegisteredMem); #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; From 308886ab94236c972067a4e24cfa2279b3e10d5a Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 19:24:16 +0800 Subject: [PATCH 07/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusted the support judgment logic. Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 9f5db8e77e463..81632ae1ca494 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3272,7 +3272,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) || (integrated && ggml_backend_buft_is_cuda_host(buft))) && buft->device == dev; + return ((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); } static int64_t get_op_batch_size(const ggml_tensor * op) { From 63db683ce99be819634742d42e6b844669e350f7 Mon Sep 17 00:00:00 2001 From: yangxiao Date: Fri, 30 May 2025 20:32:03 +0800 Subject: [PATCH 08/10] revoke the suggest commit changes due to it's not applicable in jetson_device --- ggml/src/ggml-cuda/ggml-cuda.cu | 1 - 1 file changed, 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 81632ae1ca494..c9469d2a46cd4 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -247,7 +247,6 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].nsm = prop.multiProcessorCount; info.devices[id].smpb = prop.sharedMemPerBlock; info.devices[id].warp_size = prop.warpSize; - GGML_ASSERT(!integrated || prop.canUseHostPointerForRegisteredMem); #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; From 73472d1e667768b7ff1e407837f1613f8e834873 Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 21:10:38 +0800 Subject: [PATCH 09/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add parentheses to enforce operator precedence​ Co-authored-by: Diego Devesa --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index c9469d2a46cd4..046a58c1fc8ec 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3271,7 +3271,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; - return ((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); + return (((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); } static int64_t get_op_batch_size(const ggml_tensor * op) { From 2ab5def600a9bbe8257141e1a656e7787f1b393b Mon Sep 17 00:00:00 2001 From: Shawn yang <137684499+Yangxiaoz@users.noreply.github.com> Date: Fri, 30 May 2025 21:41:50 +0800 Subject: [PATCH 10/10] Update ggml/src/ggml-cuda/ggml-cuda.cu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix ci bug: add a spaces Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 046a58c1fc8ec..7285dfab008a0 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2645,7 +2645,7 @@ static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) { - //flag used to determine whether it is an integrated_gpu + // flag used to determine whether it is an integrated_gpu const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated; while (!graph_evaluated_or_captured) {