@@ -3037,7 +3037,7 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30373037 int64_t interval = max_kernel_size;
30383038
30393039 int64_t left_pad_len = dilationVal[0 ] * (max_kernel_size - 1 ) + 1 - 2 * paddingVal[0 ];
3040- int64_t right_pad_len = 0 ;
3040+ int64_t right_pad_len = 0 ;
30413041
30423042 acl_scalar_ptr alpha = nullptr ;
30433043 float alphaValue = 1.0 ;
@@ -3084,7 +3084,7 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30843084 part_dst_ne[i] = *(dst->ne + i);
30853085 }
30863086 part_dst_ne[0 ] = (input_len - 1 ) * strideVal[0 ] - 2 * paddingVal[0 ] + dilationVal[0 ] * (part_ne[0 ] - 1 ) + 1 ;
3087-
3087+
30883088 size_t part_dst_nb[4 ];
30893089 part_dst_nb[0 ] = sizeof (weight_type);
30903090 for (int i = 1 ; i < 4 ; i++) {
@@ -3120,13 +3120,13 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
31203120 for (int i = 0 ; i < 4 ; i++){
31213121 conv_result_ne[i] = *(dst->ne + i);
31223122 }
3123-
3123+
31243124 size_t conv_result_nb[4 ];
31253125 conv_result_nb[0 ] = sizeof (weight_type);
31263126 for (int i = 1 ; i < 4 ; i++) {
31273127 conv_result_nb[i] = conv_result_nb[i - 1 ] * conv_result_ne[i - 1 ];
31283128 }
3129-
3129+
31303130 ggml_cann_pool_alloc conv_result_allocator;
31313131 conv_result_allocator.alloc (ctx.pool (), conv_result_nb[3 ]);
31323132 void * conv_result_buf = conv_result_allocator.get ();
@@ -3136,7 +3136,7 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
31363136
31373137 GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, conv_result.get ());
31383138 GGML_CANN_CALL_ACLNN_OP (ctx, ConstantPadNd, acl_part_dst.get (), padData.get (), pad_value.get (), conv_result.get ());
3139- GGML_CANN_CALL_ACLNN_OP (ctx, InplaceAdd, acl_dst.get (), conv_result.get (), alpha.get ());
3139+ GGML_CANN_CALL_ACLNN_OP (ctx, InplaceAdd, acl_dst.get (), conv_result.get (), alpha.get ());
31403140 }
31413141}
31423142
0 commit comments