@@ -551,7 +551,7 @@ static void ggml_cpy_f16_f16_cuda(
551551 (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++);
552552}
553553
554- void ggml_cuda_cpy (ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) {
554+ void ggml_cuda_cpy (ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection_for_this_node ) {
555555 const int64_t ne = ggml_nelements (src0);
556556 GGML_ASSERT (ne == ggml_nelements (src1));
557557
@@ -588,7 +588,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
588588 char ** dest_ptrs_d = nullptr ;
589589 int graph_cpynode_index = -1 ;
590590#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)
591- if (ctx.cuda_graph ->use_cpy_indirection ) {
591+ if (ctx.cuda_graph ->use_cpy_indirection && !disable_indirection_for_this_node ) {
592592 dest_ptrs_d = ctx.cuda_graph ->dest_ptrs_d ;
593593 graph_cpynode_index = ctx.cuda_graph ->graph_cpynode_index ;
594594 }
@@ -636,7 +636,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
636636 ggml_type_name (src0->type ), ggml_type_name (src1->type ));
637637 }
638638#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)
639- if (ctx.cuda_graph ->use_cpy_indirection ) {
639+ if (ctx.cuda_graph ->use_cpy_indirection && !disable_indirection_for_this_node ) {
640640 ctx.cuda_graph ->graph_cpynode_index = graph_cpynode_index;
641641 }
642642#endif
@@ -645,7 +645,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
645645
646646void ggml_cuda_dup (ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
647647 const ggml_tensor * src0 = dst->src [0 ];
648- ggml_cuda_cpy (ctx, src0, dst);
648+ bool disable_indirection = true ;
649+ ggml_cuda_cpy (ctx, src0, dst, disable_indirection);
649650}
650651
651652void * ggml_cuda_cpy_fn (const ggml_tensor * src0, ggml_tensor * src1) {
0 commit comments