Skip to content

Commit 203dc18

Browse files
hsharma35facebook-github-bot
authored andcommitted
FusionG3 operators.
Summary: Cleanup header order and `using` declarations for operators to match style guide. Reviewed By: zonglinpeng Differential Revision: D67128499
1 parent 61b9e1b commit 203dc18

File tree

9 files changed

+83
-65
lines changed

9 files changed

+83
-65
lines changed

backends/cadence/fusion_g3/operators/op_add.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,15 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
913
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1014
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
1115
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
1216
#include <executorch/runtime/kernel/kernel_includes.h>
1317
#include <executorch/runtime/platform/assert.h>
14-
#include <xa_nnlib_kernels_api.h>
1518

1619
using exec_aten::Scalar;
1720
using exec_aten::ScalarType;

backends/cadence/fusion_g3/operators/op_cat.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,17 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <cstring>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
913
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1014
#include <executorch/runtime/kernel/kernel_includes.h>
11-
#include <xa_nnlib_kernels_api.h>
12-
#include <cstring>
1315

14-
using exec_aten::Scalar;
15-
using exec_aten::ScalarType;
16-
using exec_aten::Tensor;
17-
using torch::executor::Error;
18-
using torch::executor::KernelRuntimeContext;
16+
using ::executorch::aten::ScalarType;
17+
using ::executorch::aten::Tensor;
18+
using ::executorch::runtime::Error;
19+
using ::executorch::runtime::KernelRuntimeContext;
1920

2021
/* ScalarType in Executorch do not have support for below data types.
2122
* So, creating a placeholder for these data types. Once, ScalarTypes is
@@ -194,4 +195,4 @@ Tensor& cat_out(
194195
} // namespace native
195196
} // namespace G3
196197
} // namespace impl
197-
} // namespace cadence
198+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_dequantize.cpp

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,20 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
10-
#include <executorch/runtime/kernel/kernel_includes.h>
11-
#include <xa_nnlib_kernels_api.h>
129
#include <algorithm>
1310
#include <cinttypes>
1411
#include <cmath>
1512

16-
using exec_aten::Scalar;
17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
13+
#include <xa_nnlib_kernels_api.h>
14+
15+
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
16+
#include <executorch/runtime/kernel/kernel_includes.h>
17+
18+
using ::executorch::aten::Scalar;
19+
using ::executorch::aten::ScalarType;
20+
using ::executorch::aten::Tensor;
21+
using ::executorch::runtime::Error;
22+
using ::executorch::runtime::KernelRuntimeContext;
2123

2224
template <typename T>
2325
using optional = exec_aten::optional<T>;
@@ -185,7 +187,7 @@ void dequantize_impl(
185187
if (axis == NULL) {
186188
// calculate the dequantized output, cast scale to float to match fbgemm
187189
// behavior
188-
#define ASYM_DEQUANTIZE_IMPL_TESNOR(IN_CTYPE, OUT_CTYPE, out_dtype) \
190+
#define ASYM_DEQUANTIZE_IMPL_TENSOR(IN_CTYPE, OUT_CTYPE, out_dtype) \
189191
case ScalarType::out_dtype: { \
190192
/* Hoist these function calls out of our inner loop because they might not \
191193
* get inlined without LTO, particularly in ATen mode. */ \
@@ -201,7 +203,7 @@ void dequantize_impl(
201203
#define ASYM_CALCULATE_INT_TYPE_TENSOR(IN_CTYPE, in_dtype) \
202204
case ScalarType::in_dtype: \
203205
switch (out.scalar_type()) { \
204-
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, ASYM_DEQUANTIZE_IMPL_TESNOR); \
206+
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, ASYM_DEQUANTIZE_IMPL_TENSOR); \
205207
default: \
206208
ET_CHECK_MSG( \
207209
false, \
@@ -219,7 +221,7 @@ void dequantize_impl(
219221
static_cast<int8_t>(input.scalar_type()));
220222
}
221223
#undef ASYM_CALCULATE_INT_TYPE_TENSOR
222-
#undef ASYM_DEQUANTIZE_IMPL_TESNOR
224+
#undef ASYM_DEQUANTIZE_IMPL_TENSOR
223225
} else {
224226
// a list contains all dimensions except axis
225227
int64_t dims[input.dim() - 1];

backends/cadence/fusion_g3/operators/op_mul.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,19 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <xa_nnlib_kernels_api.h>
10+
911
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1012
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
1113
#include <executorch/runtime/kernel/kernel_includes.h>
1214
#include <executorch/runtime/platform/assert.h>
13-
#include <xa_nnlib_kernels_api.h>
1415

15-
using exec_aten::Scalar;
16-
using exec_aten::ScalarType;
17-
using exec_aten::Tensor;
18-
using executorch::runtime::canCast;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
16+
using ::executorch::aten::Scalar;
17+
using ::executorch::aten::ScalarType;
18+
using ::executorch::aten::Tensor;
19+
using ::executorch::runtime::canCast;
20+
using ::executorch::runtime::Error;
21+
using ::executorch::runtime::KernelRuntimeContext;
2122

2223
namespace cadence {
2324
namespace impl {
@@ -238,4 +239,4 @@ Tensor& mul_scalar_out(
238239
} // namespace native
239240
} // namespace G3
240241
} // namespace impl
241-
} // namespace cadence
242+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,20 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <cmath>
10+
#include <tuple>
11+
12+
#include <xa_nnlib_kernels_api.h>
13+
914
#include <executorch/kernels/portable/cpu/util/normalization_ops_util.h>
1015
#include <executorch/kernels/portable/cpu/vec_ops.h>
1116
#include <executorch/runtime/kernel/kernel_includes.h>
12-
#include <xa_nnlib_kernels_api.h>
13-
#include <cmath>
14-
#include <tuple>
1517

16-
using Tensor = exec_aten::Tensor;
17-
using ScalarType = exec_aten::ScalarType;
18-
using IntArrayRef = exec_aten::ArrayRef<int64_t>;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
18+
using ::executorch::aten::IntArrayRef;
19+
using ::executorch::aten::ScalarType;
20+
using ::executorch::aten::Tensor;
21+
using ::executorch::runtime::Error;
22+
using ::executorch::runtime::KernelRuntimeContext;
2123

2224
namespace cadence {
2325
namespace impl {
@@ -255,4 +257,4 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_layer_norm_out(
255257
} // namespace native
256258
} // namespace G3
257259
} // namespace impl
258-
} // namespace cadence
260+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_quantize.cpp

Lines changed: 23 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,21 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
10-
#include <executorch/runtime/kernel/kernel_includes.h>
11-
#include <xa_nnlib_kernels_api.h>
129
#include <algorithm>
1310
#include <cinttypes>
1411
#include <cmath>
1512

16-
using exec_aten::Scalar;
17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
13+
#include <xa_nnlib_kernels_api.h>
14+
15+
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
16+
#include <executorch/runtime/kernel/kernel_includes.h>
17+
18+
using ::executorch::aten::ArrayRef;
19+
using ::executorch::aten::optional;
20+
using ::executorch::aten::ScalarType;
21+
using ::executorch::aten::Tensor;
22+
using ::executorch::runtime::Error;
23+
using ::executorch::runtime::KernelRuntimeContext;
2124

2225
/* ScalarType in Executorch do not have support for below data types.
2326
* So, creating a placeholder for these data types. Once, ScalarTypes is
@@ -142,7 +145,7 @@ void quantize_impl(
142145
int* axis,
143146
int quant_min,
144147
int quant_max) {
145-
const exec_aten::ArrayRef<Tensor::SizesType> input_size = input.sizes();
148+
const ArrayRef<Tensor::SizesType> input_size = input.sizes();
146149

147150
int kTensorDimensionLimit = 5;
148151

@@ -301,8 +304,8 @@ void quantize_impl(
301304
}
302305
}
303306

304-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
305-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
307+
optional<ArrayRef<int64_t>> optional_dim_list{
308+
ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
306309

307310
// Actual quantization logic
308311
// input, out are the input and output tensors
@@ -487,8 +490,8 @@ void quantize_impl(
487490
}
488491
}
489492

490-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
491-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
493+
optional<ArrayRef<int64_t>> optional_dim_list{
494+
ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
492495

493496
// Actual quantization logic
494497
// input, out are the input and output tensors
@@ -565,9 +568,9 @@ Tensor& quantize_per_tensor_out(
565568
int64_t quant_max,
566569
ScalarType dtype,
567570
Tensor& out) {
568-
torch::executor::Error err = resize_tensor(out, input.sizes());
571+
Error err = resize_tensor(out, input.sizes());
569572
ET_CHECK_MSG(
570-
err == torch::executor::Error::Ok,
573+
err == Error::Ok,
571574
"Failed to resize out Tensor in quantize_per_tensor_out");
572575

573576
// check_quantize_per_tensor_args(input, quant_min, quant_max, dtype, out);
@@ -600,7 +603,7 @@ Tensor& quantize_per_tensor_tensor_args_out(
600603
// after ET_KERNEL_CHECK is fully implemented and properly allows non fatal
601604
// failures.
602605
if (scale.scalar_type() != ScalarType::Double) {
603-
context.fail(torch::executor::Error::InvalidArgument);
606+
context.fail(Error::InvalidArgument);
604607
return out;
605608
}
606609
ET_CHECK_MSG(
@@ -657,7 +660,7 @@ Tensor& quantize_per_channel_out(
657660
int64_t quant_max,
658661
ScalarType dtype,
659662
Tensor& out) {
660-
torch::executor::Error err = resize_tensor(out, input.sizes());
663+
Error err = resize_tensor(out, input.sizes());
661664

662665
// normalize axis
663666
ET_CHECK_MSG(
@@ -671,7 +674,7 @@ Tensor& quantize_per_channel_out(
671674
}
672675

673676
ET_CHECK_MSG(
674-
err == torch::executor::Error::Ok,
677+
err == Error::Ok,
675678
"Failed to resize out Tensor in quantize_per_channel_out");
676679

677680
ET_CHECK_MSG(
@@ -776,9 +779,9 @@ Tensor& quantize_per_token_out(
776779
input_strides.data(),
777780
executorch::runtime::TensorShapeDynamism::STATIC);
778781
Tensor reshaped_input(&reshaped_input_impl);
779-
torch::executor::Error err = resize_tensor(out, input.sizes());
782+
Error err = resize_tensor(out, input.sizes());
780783
ET_CHECK_MSG(
781-
err == torch::executor::Error::Ok,
784+
err == Error::Ok,
782785
"Failed to resize out Tensor in quantize_per_channel_out");
783786
#endif
784787

backends/cadence/fusion_g3/operators/op_softmax.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,20 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <cmath>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
913
#include <executorch/kernels/portable/cpu/util/activation_ops_util.h>
1014
#include <executorch/kernels/portable/cpu/util/functional_util.h>
1115
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
1216
#include <executorch/runtime/kernel/kernel_includes.h>
13-
#include <xa_nnlib_kernels_api.h>
14-
#include <cmath>
1517

16-
using exec_aten::Scalar;
17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
18+
using ::executorch::aten::ArrayRef;
19+
using ::executorch::aten::ScalarType;
20+
using ::executorch::aten::Tensor;
21+
using ::executorch::runtime::Error;
22+
using ::executorch::runtime::KernelRuntimeContext;
2123

2224
namespace cadence {
2325
namespace impl {
@@ -51,7 +53,7 @@ Tensor& _softmax_out(
5153
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;
5254

5355
int inp_shapes[in.dim()];
54-
const exec_aten::ArrayRef<Tensor::SizesType> in_size = in.sizes();
56+
const ArrayRef<Tensor::SizesType> in_size = in.sizes();
5557
for (int i = 0; i < in.dim(); i++) {
5658
inp_shapes[i] = in_size[i];
5759
}

backends/cadence/fusion_g3/operators/operators.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#pragma once
10+
911
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1012
#include <executorch/runtime/kernel/kernel_runtime_context.h>
1113

backends/cadence/fusion_g3/operators/tests/test_op_add.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88

99
#include <gtest/gtest.h>
1010
#include <stdio.h>
11+
#include <sys/times.h>
12+
#include <xtensa/sim.h>
1113

1214
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
1315
#include <executorch/runtime/core/exec_aten/exec_aten.h>

0 commit comments

Comments
 (0)