Skip to content

Commit a014310

Browse files
authored
cuda : Enable CUDA Graph usage for Nemotron Nano v2 (NemotronH) (ggml-org#16328)
* Fix Nemotron Nano v2 9B not executing as CUDA Graph on NVIDIA GPUs * fix to ensure test-backend-ops check passes
1 parent 35fb824 commit a014310

File tree

3 files changed

+20
-4
lines changed

3 files changed

+20
-4
lines changed

ggml/src/ggml-cuda/cpy.cu

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,11 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
329329
} else
330330
#endif // GGML_USE_MUSA && GGML_MUSA_MUDNN_COPY
331331
{
332-
CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream));
332+
if (src0->type == GGML_TYPE_F32) {
333+
ggml_cpy_flt_cuda<float, float> (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
334+
} else {
335+
CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream));
336+
}
333337
}
334338
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
335339
ggml_cpy_flt_cuda<float, float> (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
@@ -400,7 +404,13 @@ void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
400404

401405
void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
402406
if (src0->type == src1->type && ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
403-
return nullptr;
407+
// Prioritize CUDA graph compatibility over direct memory copy optimization.
408+
// Using copy kernels here maintains graph indirection support, preventing performance regression from disabled CUDA graphs.
409+
if (src0->type == GGML_TYPE_F32) {
410+
return (void*) cpy_flt<cpy_1_flt<float, float>>;
411+
} else {
412+
return nullptr;
413+
}
404414
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
405415
return (void*) cpy_flt<cpy_1_flt<float, float>>;
406416
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) {

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2641,6 +2641,8 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
26412641
const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased";
26422642
const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased";
26432643
const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased";
2644+
const std::string nemotron_h_block_out_prefix = "nemotron_h_block_out";
2645+
const std::string mamba2_y_add_d_prefix = "mamba2_y_add_d";
26442646

26452647
for (int i = 0; i < cgraph->n_nodes; i++) {
26462648
ggml_tensor * node = cgraph->nodes[i];
@@ -2669,7 +2671,9 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
26692671
(node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) &&
26702672
strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 &&
26712673
strncmp(node->name, ffn_moe_up_bias_prefix.c_str(), ffn_moe_up_bias_prefix.size()) != 0 &&
2672-
strncmp(node->name, ffn_moe_down_bias_prefix.c_str(), ffn_moe_down_bias_prefix.size()) != 0) {
2674+
strncmp(node->name, ffn_moe_down_bias_prefix.c_str(), ffn_moe_down_bias_prefix.size()) != 0 &&
2675+
strncmp(node->name, nemotron_h_block_out_prefix.c_str(), nemotron_h_block_out_prefix.size()) != 0 &&
2676+
strncmp(node->name, mamba2_y_add_d_prefix.c_str(), mamba2_y_add_d_prefix.size()) != 0) {
26732677
// disable CUDA graphs for batch size > 1 for now while excluding the matrix-matrix addition as part of Gemma3n's `project_per_layer_input` operation
26742678
// by means of matching node names. See
26752679
// https://github.com/ggml-org/llama.cpp/blob/f9a31eea06a859e34cecb88b4d020c7f03d86cc4/src/llama-model.cpp#L10199-L10241 and

src/llama-model.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11751,6 +11751,7 @@ struct llm_graph_context_mamba : public llm_graph_context {
1175111751
// TODO: skip computing output earlier for unused tokens
1175211752

1175311753
y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
11754+
cb(y, "mamba2_y_add_d", il);
1175411755
y = ggml_swiglu_split(ctx0, ggml_cont(ctx0, z), y);
1175511756

1175611757
// grouped RMS norm
@@ -14705,6 +14706,7 @@ struct llm_build_nemotron_h : public llm_graph_context_mamba {
1470514706
ggml_tensor * inpL;
1470614707

1470714708
inpL = build_inp_embd(model.tok_embd);
14709+
ggml_build_forward_expand(gf, inpL);
1470814710

1470914711
auto * inp = build_inp_mem_hybrid();
1471014712

@@ -14736,7 +14738,7 @@ struct llm_build_nemotron_h : public llm_graph_context_mamba {
1473614738

1473714739
// add residual
1473814740
cur = ggml_add(ctx0, cur, inpSA);
14739-
cb(cur, "block_out", il);
14741+
cb(cur, "nemotron_h_block_out", il);
1474014742

1474114743
// input for next layer
1474214744
inpL = cur;

0 commit comments

Comments
 (0)