Skip to content

Commit 444958c

Browse files
Removed conflicts in op_quantize.cpp
1 parent b626b7f commit 444958c

File tree

1 file changed

+14
-39
lines changed

1 file changed

+14
-39
lines changed

backends/cadence/fusion_g3/operators/op_quantize.cpp

Lines changed: 14 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -10,50 +10,26 @@
1010
#include <cinttypes>
1111
#include <cmath>
1212

13+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
14+
1315
#include <xa_nnlib_kernels_api.h>
1416

15-
<<<<<<< HEAD
16-
<<<<<<< HEAD
17-
=======
1817
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
19-
>>>>>>> 897987e42 (copied XT_KERNEL_CHECK to a header file and included the header file in the operators at backends\cadence\fusion_f3\operators folder)
2018
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
2119
#include <executorch/runtime/kernel/kernel_includes.h>
2220

23-
<<<<<<< HEAD
24-
using ::executorch::aten::ArrayRef;
25-
using ::executorch::aten::optional;
21+
using ::executorch::aten::Scalar;
2622
using ::executorch::aten::ScalarType;
2723
using ::executorch::aten::Tensor;
2824
using ::executorch::runtime::Error;
2925
using ::executorch::runtime::KernelRuntimeContext;
30-
=======
31-
=======
32-
>>>>>>> d411838ef (added operators for sub, slice, permute, exp, mean, div. Disabled argument checks in the operators using macro)
33-
using exec_aten::Scalar;
34-
using exec_aten::ScalarType;
35-
using exec_aten::Tensor;
36-
using torch::executor::Error;
37-
using torch::executor::KernelRuntimeContext;
38-
<<<<<<< HEAD
39-
>>>>>>> 92b58ef81 (Resolved Linter errors)
40-
=======
41-
>>>>>>> d411838ef (added operators for sub, slice, permute, exp, mean, div. Disabled argument checks in the operators using macro)
4226

4327
/* ScalarType in Executorch do not have support for below data types.
4428
* So, creating a placeholder for these data types. Once, ScalarTypes is
4529
* updated to have support for below data types, these can be removed and
4630
* operator need to be updated accordingly
4731
*/
48-
<<<<<<< HEAD
4932
enum datatype { Ushort = 20, Bits4u = 21, Bits4 = 22 };
50-
=======
51-
enum datatype {
52-
Ushort = 20,
53-
Bits4u = 21,
54-
Bits4 = 22
55-
};
56-
>>>>>>> 92b58ef81 (Resolved Linter errors)
5733

5834
/**
5935
* For an input tensor, use the scale and zero_point arguments to quantize it.
@@ -172,7 +148,8 @@ Tensor& quantize_impl(
172148
int* axis,
173149
int quant_min,
174150
int quant_max) {
175-
const exec_aten::ArrayRef<Tensor::SizesType> input_size = input.sizes();
151+
const ::executorch::aten::ArrayRef<Tensor::SizesType> input_size =
152+
input.sizes();
176153

177154
int kTensorDimensionLimit = 5;
178155

@@ -350,8 +327,9 @@ Tensor& quantize_impl(
350327
}
351328
}
352329

353-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
354-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
330+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
331+
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
332+
dims, size_t(input.dim() - 1)}};
355333

356334
// Actual quantization logic
357335
// input, out are the input and output tensors
@@ -554,8 +532,9 @@ Tensor& quantize_impl(
554532
}
555533
}
556534

557-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
558-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
535+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
536+
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
537+
dims, size_t(input.dim() - 1)}};
559538

560539
// Actual quantization logic
561540
// input, out are the input and output tensors
@@ -820,11 +799,11 @@ Tensor& quantize_per_token_out(
820799
Tensor reshaped_input = at::from_blob(
821800
input.mutable_data_ptr(), sizes, at::TensorOptions(input.scalar_type()));
822801
#else
823-
std::array<exec_aten::DimOrderType, 2> input_dim_order{0, 1};
824-
std::array<exec_aten::SizesType, 2> input_sizes;
802+
std::array<::executorch::aten::DimOrderType, 2> input_dim_order{0, 1};
803+
std::array<::executorch::aten::SizesType, 2> input_sizes;
825804
input_sizes[0] = num_tokens;
826805
input_sizes[1] = input.size(input.dim() - 1);
827-
std::array<exec_aten::StridesType, 2> input_strides;
806+
std::array<::executorch::aten::StridesType, 2> input_strides;
828807
executorch::runtime::dim_order_to_stride_nocheck(
829808
input_sizes.data(), input_dim_order.data(), 2, input_strides.data());
830809
void* input_data = input.mutable_data_ptr();
@@ -859,8 +838,4 @@ Tensor& quantize_per_token_out(
859838
} // namespace native
860839
} // namespace G3
861840
} // namespace impl
862-
<<<<<<< HEAD
863-
} // namespace cadence
864-
=======
865841
} // namespace cadence
866-
>>>>>>> 7bd011ff5 (Updated name space of the operators by appending cadence)

0 commit comments

Comments
 (0)