@@ -450,7 +450,7 @@ void ggml_cann_acc(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
450450
451451    if  (!inplace) {
452452        size_t  cpy_size = ggml_nbytes (dst);
453-         ggml_cann_async_memcpy (ctx, dst->data , src0->data , cpy_size,  
453+         ggml_cann_async_memcpy (ctx, dst->data , src0->data , cpy_size,
454454            ACL_MEMCPY_DEVICE_TO_DEVICE);
455455        aclTensor* acl_src0 = ggml_cann_create_tensor (
456456            src0, src1->ne , src0->nb , GGML_MAX_DIMS, ACL_FORMAT_ND, offset);
@@ -991,7 +991,7 @@ static void ggml_cann_im2col_1d_post_process(
991991                             c * KH * KW * n_step_w * ggml_type_size (dst->type );
992992
993993            for  (int  i = 0 ; i < n_step_w; i++) {
994-                 ggml_cann_async_memcpy (ctx, cur_dst_buffer, cur_permute_buffer, size_cpy,  
994+                 ggml_cann_async_memcpy (ctx, cur_dst_buffer, cur_permute_buffer, size_cpy,
995995                    ACL_MEMCPY_DEVICE_TO_DEVICE);
996996                cur_dst_buffer =
997997                    (char *)cur_dst_buffer + KH * KW * ggml_type_size (dst->type );
@@ -1002,7 +1002,7 @@ static void ggml_cann_im2col_1d_post_process(
10021002    } else  {
10031003        offset = KH * KW * n_step_w *
10041004                 ggml_type_size (dst->type );  //  equal to ggml_nbytes(dst)
1005-         ggml_cann_async_memcpy (ctx, dst->data , (char *)tmp_permute_buffer + offset, offset,  
1005+         ggml_cann_async_memcpy (ctx, dst->data , (char *)tmp_permute_buffer + offset, offset,
10061006            ACL_MEMCPY_DEVICE_TO_DEVICE);
10071007    }
10081008
@@ -1102,7 +1102,7 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
11021102                                         tmp_im2col_tensor, im2col_op_params);
11031103    }
11041104
1105-     ggml_cann_release_resources (ctx, acl_src1, tmp_im2col_tensor, tmp_cast_tensor,  
1105+     ggml_cann_release_resources (ctx, acl_src1, tmp_im2col_tensor, tmp_cast_tensor,
11061106        kernel_size, dilations, paddings, strides);
11071107}
11081108
@@ -1239,7 +1239,7 @@ void ggml_cann_timestep_embedding(ggml_backend_cann_context& ctx,
12391239
12401240    //  release
12411241    //  segmentation fault when delete both tensorList and his elements.
1242-     ggml_cann_release_resources (ctx, tensor_list, acl_src, tmp_arange_tensor,  
1242+     ggml_cann_release_resources (ctx, tensor_list, acl_src, tmp_arange_tensor,
12431243        tmp_permute_tensor, tmp_mul_tensor, acl_dst);
12441244}
12451245
@@ -1430,8 +1430,8 @@ static void aclnn_alibi(ggml_backend_cann_context& ctx, aclTensor* acl_src,
14301430
14311431    //  add
14321432    aclnn_add (ctx, tmp_output_tensor, acl_src, acl_dst);
1433-     ggml_cann_release_resources (ctx, tmp_arange1_tensor, tmp_arange2_tensor,  
1434-         tmp_mk_base1_tensor, tmp_mk_base2_tensor, tmp_mk_base_tensor,  
1433+     ggml_cann_release_resources (ctx, tmp_arange1_tensor, tmp_arange2_tensor,
1434+         tmp_mk_base1_tensor, tmp_mk_base2_tensor, tmp_mk_base_tensor,
14351435        tmp_arange_tensor, tmp_mk_tensor, tmp_output_tensor);
14361436}
14371437
@@ -1563,7 +1563,7 @@ void ggml_cann_softmax(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
15631563        aclnn_softmax (ctx, acl_input_mul_scale_tensor, 3 , acl_dst);
15641564    }
15651565
1566-     ggml_cann_release_resources (ctx, acl_src0, acl_src1_fp32_tensor, acl_dst,  
1566+     ggml_cann_release_resources (ctx, acl_src0, acl_src1_fp32_tensor, acl_dst,
15671567        acl_scale, acl_input_mul_scale_tensor, tmp_mask_tensor);
15681568}
15691569
@@ -2076,7 +2076,7 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst,
20762076
20772077    //  power
20782078    aclScalar* acl_theta_scale = aclCreateScalar (&theta_scale, aclDataType::ACL_FLOAT);
2079-     GGML_CANN_CALL_ACLNN_OP (ctx, PowScalarTensor, acl_theta_scale, acl_theta_scale_tensor,  
2079+     GGML_CANN_CALL_ACLNN_OP (ctx, PowScalarTensor, acl_theta_scale, acl_theta_scale_tensor,
20802080                            acl_theta_scale_tensor);
20812081
20822082    //  freq_scale
@@ -2159,7 +2159,7 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst,
21592159    }
21602160
21612161    //  release
2162-     ggml_cann_release_resources (ctx, acl_theta_scale_tensor, acl_position_tensor,  
2162+     ggml_cann_release_resources (ctx, acl_theta_scale_tensor, acl_position_tensor,
21632163        acl_theta_tensor, acl_sin_tensor, acl_cos_tensor, acl_theta_scale);
21642164}
21652165
@@ -2307,7 +2307,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
23072307        int64_t  shifts[] = {src0->ne [0 ] / 2 };
23082308        int64_t  dims[] = {3 };
23092309        aclnn_roll (ctx, acl_input_tensor, acl_input_roll_tensor, shifts, dims);
2310-          
2310+ 
23112311        ggml_cann_release_resources (ctx, acl_input_roll_tensor, acl_input_tensor);
23122312        //  init [-1, -1, -1, 1, 1,1,...]
23132313        minus_one_scale_buffer = minus_one_scale_allocator.get ();
@@ -2411,7 +2411,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
24112411
24122412    switch  (src0->type ) {
24132413        case  GGML_TYPE_F32: {
2414-             GGML_CANN_CALL_ACLNN_OP (ctx, RotaryPositionEmbedding, acl_src,  
2414+             GGML_CANN_CALL_ACLNN_OP (ctx, RotaryPositionEmbedding, acl_src,
24152415                acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode, acl_dst);
24162416            break ;
24172417        }
@@ -2438,8 +2438,8 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
24382438
24392439            aclnn_cast (ctx, acl_src, acl_src_trans_tensor, ACL_FLOAT);
24402440
2441-             GGML_CANN_CALL_ACLNN_OP (ctx, RotaryPositionEmbedding, acl_src_trans_tensor,  
2442-                 acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode,  
2441+             GGML_CANN_CALL_ACLNN_OP (ctx, RotaryPositionEmbedding, acl_src_trans_tensor,
2442+                 acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode,
24432443                acl_dst_trans_tensor);
24442444
24452445            aclnn_cast (ctx, acl_dst_trans_tensor, acl_dst, ACL_FLOAT16);
@@ -2452,7 +2452,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
24522452            GGML_ABORT (" Unsupported tensor type for GGML_OP_ROPE"  );
24532453            break ;
24542454    }
2455-     ggml_cann_release_resources (ctx, acl_cos_reshape_tensor,  
2455+     ggml_cann_release_resources (ctx, acl_cos_reshape_tensor,
24562456        acl_sin_reshape_tensor, acl_src, acl_dst);
24572457}
24582458
0 commit comments