|
10 | 10 | #include <cinttypes> |
11 | 11 | #include <cmath> |
12 | 12 |
|
| 13 | +#include <executorch/backends/cadence/fusion_g3/operators/operators.h> |
| 14 | + |
13 | 15 | #include <xa_nnlib_kernels_api.h> |
14 | 16 |
|
15 | | -<<<<<<< HEAD |
16 | | -<<<<<<< HEAD |
17 | | -======= |
18 | 17 | #include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h> |
19 | | ->>>>>>> 897987e42 (copied XT_KERNEL_CHECK to a header file and included the header file in the operators at backends\cadence\fusion_f3\operators folder) |
20 | 18 | #include <executorch/kernels/portable/cpu/util/reduce_util.h> |
21 | 19 | #include <executorch/runtime/kernel/kernel_includes.h> |
22 | 20 |
|
23 | | -<<<<<<< HEAD |
24 | | -using ::executorch::aten::ArrayRef; |
25 | | -using ::executorch::aten::optional; |
| 21 | +using ::executorch::aten::Scalar; |
26 | 22 | using ::executorch::aten::ScalarType; |
27 | 23 | using ::executorch::aten::Tensor; |
28 | 24 | using ::executorch::runtime::Error; |
29 | 25 | using ::executorch::runtime::KernelRuntimeContext; |
30 | | -======= |
31 | | -======= |
32 | | ->>>>>>> d411838ef (added operators for sub, slice, permute, exp, mean, div. Disabled argument checks in the operators using macro) |
33 | | -using exec_aten::Scalar; |
34 | | -using exec_aten::ScalarType; |
35 | | -using exec_aten::Tensor; |
36 | | -using torch::executor::Error; |
37 | | -using torch::executor::KernelRuntimeContext; |
38 | | -<<<<<<< HEAD |
39 | | ->>>>>>> 92b58ef81 (Resolved Linter errors) |
40 | | -======= |
41 | | ->>>>>>> d411838ef (added operators for sub, slice, permute, exp, mean, div. Disabled argument checks in the operators using macro) |
42 | 26 |
|
43 | 27 | /* ScalarType in Executorch do not have support for below data types. |
44 | 28 | * So, creating a placeholder for these data types. Once, ScalarTypes is |
45 | 29 | * updated to have support for below data types, these can be removed and |
46 | 30 | * operator need to be updated accordingly |
47 | 31 | */ |
48 | | -<<<<<<< HEAD |
49 | 32 | enum datatype { Ushort = 20, Bits4u = 21, Bits4 = 22 }; |
50 | | -======= |
51 | | - enum datatype { |
52 | | - Ushort = 20, |
53 | | - Bits4u = 21, |
54 | | - Bits4 = 22 |
55 | | - }; |
56 | | ->>>>>>> 92b58ef81 (Resolved Linter errors) |
57 | 33 |
|
58 | 34 | /** |
59 | 35 | * For an input tensor, use the scale and zero_point arguments to quantize it. |
@@ -172,7 +148,8 @@ Tensor& quantize_impl( |
172 | 148 | int* axis, |
173 | 149 | int quant_min, |
174 | 150 | int quant_max) { |
175 | | - const exec_aten::ArrayRef<Tensor::SizesType> input_size = input.sizes(); |
| 151 | + const ::executorch::aten::ArrayRef<Tensor::SizesType> input_size = |
| 152 | + input.sizes(); |
176 | 153 |
|
177 | 154 | int kTensorDimensionLimit = 5; |
178 | 155 |
|
@@ -350,8 +327,9 @@ Tensor& quantize_impl( |
350 | 327 | } |
351 | 328 | } |
352 | 329 |
|
353 | | - exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{ |
354 | | - exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}}; |
| 330 | + ::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>> |
| 331 | + optional_dim_list{::executorch::aten::ArrayRef<int64_t>{ |
| 332 | + dims, size_t(input.dim() - 1)}}; |
355 | 333 |
|
356 | 334 | // Actual quantization logic |
357 | 335 | // input, out are the input and output tensors |
@@ -554,8 +532,9 @@ Tensor& quantize_impl( |
554 | 532 | } |
555 | 533 | } |
556 | 534 |
|
557 | | - exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{ |
558 | | - exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}}; |
| 535 | + ::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>> |
| 536 | + optional_dim_list{::executorch::aten::ArrayRef<int64_t>{ |
| 537 | + dims, size_t(input.dim() - 1)}}; |
559 | 538 |
|
560 | 539 | // Actual quantization logic |
561 | 540 | // input, out are the input and output tensors |
@@ -820,11 +799,11 @@ Tensor& quantize_per_token_out( |
820 | 799 | Tensor reshaped_input = at::from_blob( |
821 | 800 | input.mutable_data_ptr(), sizes, at::TensorOptions(input.scalar_type())); |
822 | 801 | #else |
823 | | - std::array<exec_aten::DimOrderType, 2> input_dim_order{0, 1}; |
824 | | - std::array<exec_aten::SizesType, 2> input_sizes; |
| 802 | + std::array<::executorch::aten::DimOrderType, 2> input_dim_order{0, 1}; |
| 803 | + std::array<::executorch::aten::SizesType, 2> input_sizes; |
825 | 804 | input_sizes[0] = num_tokens; |
826 | 805 | input_sizes[1] = input.size(input.dim() - 1); |
827 | | - std::array<exec_aten::StridesType, 2> input_strides; |
| 806 | + std::array<::executorch::aten::StridesType, 2> input_strides; |
828 | 807 | executorch::runtime::dim_order_to_stride_nocheck( |
829 | 808 | input_sizes.data(), input_dim_order.data(), 2, input_strides.data()); |
830 | 809 | void* input_data = input.mutable_data_ptr(); |
@@ -859,8 +838,4 @@ Tensor& quantize_per_token_out( |
859 | 838 | } // namespace native |
860 | 839 | } // namespace G3 |
861 | 840 | } // namespace impl |
862 | | -<<<<<<< HEAD |
863 | | -} // namespace cadence |
864 | | -======= |
865 | 841 | } // namespace cadence |
866 | | ->>>>>>> 7bd011ff5 (Updated name space of the operators by appending cadence) |
0 commit comments