@@ -3013,11 +3013,11 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30133013
30143014 int64_t strideVal[1 ];
30153015 strideVal[0 ] = s0;
3016- aclIntArray * stride = aclCreateIntArray (strideVal, 1 );
3016+ acl_int_array_ptr stride = ggml_cann_create_int_array (strideVal, 1 );
30173017 int64_t paddingVal[] = {0 };
3018- aclIntArray * padding = aclCreateIntArray (paddingVal, 1 );
3018+ acl_int_array_ptr padding = ggml_cann_create_int_array (paddingVal, 1 );
30193019 int64_t dilationVal[] = {1 };
3020- aclIntArray * dilation = aclCreateIntArray (dilationVal, 1 );
3020+ acl_int_array_ptr dilation = ggml_cann_create_int_array (dilationVal, 1 );
30213021 bool transposed = true ;
30223022 int64_t groups = 1 ;
30233023 int8_t cubeMathType = 0 ;
@@ -3039,12 +3039,12 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30393039 int64_t left_pad_len = dilationVal[0 ] * (max_kernel_size - 1 ) + 1 - 2 * paddingVal[0 ];
30403040 int64_t right_pad_len = 0 ;
30413041
3042- aclScalar* alpha = nullptr ;
3042+ acl_scalar_ptr alpha = nullptr ;
30433043 float alphaValue = 1.0 ;
3044- alpha = aclCreateScalar (&alphaValue, aclDataType::ACL_FLOAT);
3044+ alpha = ggml_cann_create_scalar (&alphaValue, aclDataType::ACL_FLOAT);
30453045
30463046 // set zero to destination
3047- GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, acl_dst);
3047+ GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, acl_dst. get () );
30483048
30493049 for (int k = 0 ; k < part_num; k++){
30503050
@@ -3076,7 +3076,7 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30763076 acl_tensor_ptr part_kernel = ggml_cann_create_tensor (part_kernel_buf, weight_type,
30773077 ggml_element_size (src0), part_ne, part_nb, 3 , ACL_FORMAT_NCL);
30783078
3079- GGML_CANN_CALL_ACLNN_OP (ctx, Slice, acl_weight, slice_dim, slice_start, slice_end, slice_step, part_kernel);
3079+ GGML_CANN_CALL_ACLNN_OP (ctx, Slice, acl_weight. get () , slice_dim, slice_start, slice_end, slice_step, part_kernel. get () );
30803080
30813081 // create the part conv result tensor
30823082 int64_t part_dst_ne[4 ];
@@ -3096,11 +3096,11 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
30963096
30973097 acl_tensor_ptr acl_part_dst = ggml_cann_create_tensor (part_dst_buf, dst_type, ggml_element_size (dst),
30983098 part_dst_ne, part_dst_nb, 3 , ACL_FORMAT_NCL);
3099- GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, acl_part_dst);
3099+ GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, acl_part_dst. get () );
31003100
31013101 // compute part conv transpose 1d
3102- GGML_CANN_CALL_ACLNN_OP (ctx, Convolution, acl_input, part_kernel, nullptr , stride,
3103- padding, dilation, transposed, padding, groups, acl_part_dst, cubeMathType);
3102+ GGML_CANN_CALL_ACLNN_OP (ctx, Convolution, acl_input. get () , part_kernel. get () , nullptr , stride. get () ,
3103+ padding. get () , dilation. get () , transposed, padding. get () , groups, acl_part_dst. get () , cubeMathType);
31043104
31053105 // compute the position of part result in final result
31063106 int64_t global_start = slice_start;
@@ -3110,11 +3110,11 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
31103110 right_pad_len = dst_len - global_end;
31113111
31123112 std::vector<int64_t > padDataVal = {left_pad_len,right_pad_len};
3113- aclIntArray * padData = aclCreateIntArray (padDataVal.data (), 2 );
3113+ acl_int_array_ptr padData = ggml_cann_create_int_array (padDataVal.data (), 2 );
31143114
3115- aclScalar* pad_value = nullptr ;
3115+ acl_scalar_ptr pad_value = nullptr ;
31163116 float pad_valueVal = 0.0 ;
3117- pad_value = aclCreateScalar (&pad_valueVal, aclDataType::ACL_FLOAT);
3117+ pad_value = ggml_cann_create_scalar (&pad_valueVal, aclDataType::ACL_FLOAT);
31183118
31193119 int64_t conv_result_ne[4 ];
31203120 for (int i = 0 ; i < 4 ; i++){
@@ -3134,9 +3134,9 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds
31343134 acl_tensor_ptr conv_result = ggml_cann_create_tensor (conv_result_buf, dst_type, ggml_element_size (dst),
31353135 conv_result_ne, conv_result_nb, 3 , ACL_FORMAT_NCL);
31363136
3137- GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, conv_result);
3138- GGML_CANN_CALL_ACLNN_OP (ctx, ConstantPadNd, acl_part_dst, padData, pad_value, conv_result);
3139- GGML_CANN_CALL_ACLNN_OP (ctx, InplaceAdd, acl_dst, conv_result, alpha);
3137+ GGML_CANN_CALL_ACLNN_OP (ctx, InplaceZero, conv_result. get () );
3138+ GGML_CANN_CALL_ACLNN_OP (ctx, ConstantPadNd, acl_part_dst. get () , padData. get () , pad_value. get () , conv_result. get () );
3139+ GGML_CANN_CALL_ACLNN_OP (ctx, InplaceAdd, acl_dst. get () , conv_result. get () , alpha. get () );
31403140 }
31413141}
31423142
0 commit comments