Skip to content

Commit e79b466

Browse files
Name space usage is changed in the operators
1 parent 444958c commit e79b466

File tree

15 files changed

+306
-156
lines changed

15 files changed

+306
-156
lines changed

backends/cadence/fusion_g3/operators/op_add.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,13 @@
1616
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
1717
#include <executorch/runtime/kernel/kernel_includes.h>
1818
#include <executorch/runtime/platform/assert.h>
19-
#include <xa_nnlib_kernels_api.h>
2019

21-
using exec_aten::Scalar;
22-
using exec_aten::ScalarType;
23-
using exec_aten::Tensor;
24-
using executorch::runtime::canCast;
25-
using torch::executor::Error;
26-
using torch::executor::KernelRuntimeContext;
20+
using ::executorch::aten::Scalar;
21+
using ::executorch::aten::ScalarType;
22+
using ::executorch::aten::Tensor;
23+
using ::executorch::runtime::canCast;
24+
using ::executorch::runtime::Error;
25+
using ::executorch::runtime::KernelRuntimeContext;
2726

2827
namespace cadence {
2928
namespace impl {
@@ -347,6 +346,7 @@ Tensor& add_scalar_out(
347346
SAME_AS_COMMON);
348347
});
349348
}
349+
350350
return out;
351351
}
352352

backends/cadence/fusion_g3/operators/op_cat.cpp

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,18 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1011
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1112
#include <executorch/runtime/kernel/kernel_includes.h>
1213
#include <xa_nnlib_kernels_api.h>
1314
#include <cstring>
1415

15-
using exec_aten::Scalar;
16-
using exec_aten::ScalarType;
17-
using exec_aten::Tensor;
18-
using torch::executor::Error;
19-
using torch::executor::KernelRuntimeContext;
16+
using ::executorch::aten::Scalar;
17+
using ::executorch::aten::ScalarType;
18+
using ::executorch::aten::Tensor;
19+
using ::executorch::runtime::Error;
20+
using ::executorch::runtime::KernelRuntimeContext;
2021

2122
/* ScalarType in Executorch do not have support for below data types.
2223
* So, creating a placeholder for these data types. Once, ScalarTypes is
@@ -32,7 +33,7 @@ namespace native {
3233

3334
Tensor& cat_out(
3435
KernelRuntimeContext& ctx,
35-
exec_aten::ArrayRef<Tensor> tensors,
36+
::executorch::aten::ArrayRef<Tensor> tensors,
3637
int64_t dim,
3738
Tensor& out) {
3839
if (dim < 0) {
@@ -80,7 +81,7 @@ Tensor& cat_out(
8081
int inp_shapes_size[tensors.size()];
8182

8283
int temp_sizes[tensors.size()][kTensorDimensionLimit];
83-
exec_aten::ArrayRef<Tensor::SizesType> temp_size;
84+
::executorch::aten::ArrayRef<Tensor::SizesType> temp_size;
8485

8586
for (int i = 0; i < tensors.size(); i++) {
8687
inp_tensors[i] = tensors[i].const_data_ptr<signed char>();
@@ -95,7 +96,7 @@ Tensor& cat_out(
9596

9697
signed char* out_data = out.mutable_data_ptr<signed char>();
9798

98-
const exec_aten::ArrayRef<Tensor::SizesType> out_size = out.sizes();
99+
const ::executorch::aten::ArrayRef<Tensor::SizesType> out_size = out.sizes();
99100
int out_shapes[kTensorDimensionLimit];
100101
for (int i = 0; i < out_size.size(); i++) // output shapes
101102
{

backends/cadence/fusion_g3/operators/op_dequantize.cpp

Lines changed: 30 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,25 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
13+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1014
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
1115
#include <executorch/runtime/kernel/kernel_includes.h>
12-
#include <xa_nnlib_kernels_api.h>
1316
#include <algorithm>
1417
#include <cinttypes>
1518
#include <cmath>
1619

17-
using exec_aten::Scalar;
18-
using exec_aten::ScalarType;
19-
using exec_aten::Tensor;
20-
using torch::executor::Error;
21-
using torch::executor::KernelRuntimeContext;
20+
using ::executorch::aten::Scalar;
21+
using ::executorch::aten::ScalarType;
22+
using ::executorch::aten::Tensor;
23+
using ::executorch::runtime::Error;
24+
using ::executorch::runtime::KernelRuntimeContext;
2225

2326
template <typename T>
24-
using optional = exec_aten::optional<T>;
27+
using optional = ::executorch::aten::optional<T>;
2528
/* ScalarType in Executorch do not have support for below data types.
2629
* So, creating a placeholder for these data types. Once, ScalarTypes is
2730
* updated to have support for below data types, these can be removed and
@@ -48,7 +51,7 @@ void check_dequantize_per_tensor_args(
4851
int64_t quant_min,
4952
int64_t quant_max,
5053
ScalarType dtype,
51-
exec_aten::optional<ScalarType>& out_dtype,
54+
::executorch::aten::optional<ScalarType>& out_dtype,
5255
Tensor& out) {
5356
ET_CHECK_MSG(
5457
input.scalar_type() == ScalarType::Byte ||
@@ -91,8 +94,9 @@ Tensor& dequantize_impl(
9194
float* scale_data,
9295
int* zero_point_data,
9396
int* axis,
94-
exec_aten::optional<ScalarType> out_dtype) {
95-
const exec_aten::ArrayRef<Tensor::SizesType> input_size = input.sizes();
97+
::executorch::aten::optional<ScalarType> out_dtype) {
98+
const ::executorch::aten::ArrayRef<Tensor::SizesType> input_size =
99+
input.sizes();
96100

97101
int kTensorDimensionLimit = 5;
98102

@@ -251,8 +255,9 @@ Tensor& dequantize_impl(
251255
}
252256
}
253257

254-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
255-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
258+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
259+
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
260+
dims, size_t(input.dim() - 1)}};
256261

257262
// Actual dequantization logic
258263
// input, out are the input and output tensors
@@ -456,8 +461,9 @@ Tensor& dequantize_impl(
456461
}
457462
}
458463

459-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> optional_dim_list{
460-
exec_aten::ArrayRef<int64_t>{dims, size_t(input.dim() - 1)}};
464+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
465+
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
466+
dims, size_t(input.dim() - 1)}};
461467

462468
// Actual dequantization logic
463469
// input, out are the input and output tensors
@@ -559,7 +565,7 @@ Tensor& dequantize_per_tensor_out(
559565
int64_t quant_min,
560566
int64_t quant_max,
561567
ScalarType dtype,
562-
exec_aten::optional<ScalarType> out_dtype,
568+
::executorch::aten::optional<ScalarType> out_dtype,
563569
Tensor& out) {
564570
#ifdef OP_ARG_CHECK
565571
torch::executor::Error err = resize_tensor(out, input.sizes());
@@ -588,7 +594,7 @@ Tensor& dequantize_per_tensor_tensor_args_out(
588594
int64_t quant_min,
589595
int64_t quant_max,
590596
ScalarType dtype,
591-
exec_aten::optional<ScalarType> out_dtype,
597+
::executorch::aten::optional<ScalarType> out_dtype,
592598
Tensor& out) {
593599
#ifdef OP_ARG_CHECK
594600
ET_CHECK_MSG(
@@ -627,12 +633,12 @@ Tensor& dequantize_per_channel_out(
627633
KernelRuntimeContext& context,
628634
const Tensor& input,
629635
const Tensor& scale,
630-
const exec_aten::optional<Tensor>& opt_zero_points,
636+
const ::executorch::aten::optional<Tensor>& opt_zero_points,
631637
int64_t axis,
632638
int64_t quant_min,
633639
int64_t quant_max,
634640
ScalarType dtype,
635-
exec_aten::optional<ScalarType> out_dtype,
641+
::executorch::aten::optional<ScalarType> out_dtype,
636642
Tensor& out) {
637643
if (axis < 0) {
638644
axis += executorch::runtime::nonzero_dim(input);
@@ -725,18 +731,18 @@ Tensor& dequantize_per_token_out(
725731
}
726732
// This unfortunate change is needed because we compile op_quantize for aten
727733
// mode as well
728-
std::array<exec_aten::SizesType, 2> input_sizes;
729-
input_sizes[0] = static_cast<exec_aten::SizesType>(num_channels);
734+
std::array<::executorch::aten::SizesType, 2> input_sizes;
735+
input_sizes[0] = static_cast<::executorch::aten::SizesType>(num_channels);
730736
input_sizes[1] =
731-
static_cast<exec_aten::SizesType>(input.size(input.dim() - 1));
737+
static_cast<::executorch::aten::SizesType>(input.size(input.dim() - 1));
732738
#ifdef USE_ATEN_LIB
733739
Tensor reshaped_input = at::from_blob(
734740
input.mutable_data_ptr(),
735741
input_sizes,
736742
at::TensorOptions(input.scalar_type()));
737743
#else
738-
std::array<exec_aten::DimOrderType, 2> input_dim_order{0, 1};
739-
std::array<exec_aten::StridesType, 2> input_strides;
744+
std::array<::executorch::aten::DimOrderType, 2> input_dim_order{0, 1};
745+
std::array<::executorch::aten::StridesType, 2> input_strides;
740746
executorch::runtime::dim_order_to_stride_nocheck(
741747
input_sizes.data(), input_dim_order.data(), 2, input_strides.data());
742748
void* input_data = input.mutable_data_ptr();
@@ -769,22 +775,6 @@ Tensor& dequantize_per_token_out(
769775
out);
770776
}
771777

772-
Tensor& dequantize_per_token_out(
773-
KernelRuntimeContext& context,
774-
const Tensor& input,
775-
const Tensor& scale,
776-
const Tensor& zero_points,
777-
int64_t quant_min,
778-
int64_t quant_max,
779-
ScalarType dtype,
780-
ScalarType out_dtype,
781-
Tensor& out)
782-
{
783-
(void)context;
784-
return dequantize_per_token_out(
785-
input, scale, zero_points, quant_min, quant_max, dtype, out_dtype, out);
786-
}
787-
788778
} // namespace native
789779
} // namespace G3
790780
} // namespace impl

backends/cadence/fusion_g3/operators/op_div.cpp

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,21 +6,24 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
13+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1014
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1115
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
1216
#include <executorch/kernels/portable/cpu/util/math_util.h>
1317
#include <executorch/runtime/kernel/kernel_includes.h>
1418
#include <executorch/runtime/platform/assert.h>
15-
#include <xa_nnlib_kernels_api.h>
1619
#include <cmath>
1720

18-
using exec_aten::Scalar;
19-
using exec_aten::ScalarType;
20-
using exec_aten::Tensor;
21-
using executorch::runtime::canCast;
22-
using torch::executor::Error;
23-
using torch::executor::KernelRuntimeContext;
21+
using ::executorch::aten::Scalar;
22+
using ::executorch::aten::ScalarType;
23+
using ::executorch::aten::Tensor;
24+
using ::executorch::runtime::canCast;
25+
using ::executorch::runtime::Error;
26+
using ::executorch::runtime::KernelRuntimeContext;
2427

2528
namespace cadence {
2629
namespace impl {
@@ -226,7 +229,7 @@ Tensor& div_out_mode(
226229
KernelRuntimeContext& ctx,
227230
const Tensor& a,
228231
const Tensor& b,
229-
exec_aten::optional<exec_aten::string_view> mode,
232+
::executorch::aten::optional<::executorch::aten::string_view> mode,
230233
Tensor& out) {
231234
if (!mode.has_value()) {
232235
return div_out(ctx, a, b, out);
@@ -542,7 +545,7 @@ Tensor& div_scalar_mode_out(
542545
KernelRuntimeContext& ctx,
543546
const Tensor& a,
544547
const Scalar& b,
545-
exec_aten::optional<exec_aten::string_view> mode,
548+
::executorch::aten::optional<::executorch::aten::string_view> mode,
546549
Tensor& out) {
547550
if (!mode.has_value()) {
548551
return div_scalar_out(ctx, a, b, out);

backends/cadence/fusion_g3/operators/op_exp.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,17 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1011
#include <executorch/kernels/portable/cpu/pattern/pattern.h>
1112
#include <executorch/runtime/kernel/kernel_includes.h>
1213
#include <xa_nnlib_kernels_api.h>
1314
#include <cmath>
1415

15-
using exec_aten::Scalar;
16-
using exec_aten::ScalarType;
17-
using exec_aten::Tensor;
18-
using torch::executor::Error;
16+
using ::executorch::aten::Scalar;
17+
using ::executorch::aten::ScalarType;
18+
using ::executorch::aten::Tensor;
19+
using ::executorch::runtime::Error;
1920
using torch::executor::RuntimeContext;
2021

2122
namespace cadence {

backends/cadence/fusion_g3/operators/op_mean.cpp

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,21 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
13+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1014
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
1115
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
1216
#include <executorch/runtime/kernel/kernel_includes.h>
1317
#include <executorch/runtime/platform/assert.h>
14-
#include <xa_nnlib_kernels_api.h>
1518

16-
using exec_aten::Scalar;
17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
19-
using torch::executor::Error;
20-
using torch::executor::KernelRuntimeContext;
19+
using ::executorch::aten::Scalar;
20+
using ::executorch::aten::ScalarType;
21+
using ::executorch::aten::Tensor;
22+
using ::executorch::runtime::Error;
23+
using ::executorch::runtime::KernelRuntimeContext;
2124

2225
namespace cadence {
2326
namespace impl {
@@ -27,7 +30,8 @@ namespace native {
2730
int prepare_data(
2831
const Tensor& in,
2932
Tensor& out,
30-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> dim_list,
33+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
34+
dim_list,
3135
int* inp_shape,
3236
int* out_shape,
3337
int* p_axis,
@@ -58,9 +62,10 @@ int prepare_data(
5862
Tensor& mean_dim_out(
5963
KernelRuntimeContext& ctx,
6064
const Tensor& in,
61-
exec_aten::optional<exec_aten::ArrayRef<int64_t>> dim_list,
65+
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
66+
dim_list,
6267
bool keepdim,
63-
exec_aten::optional<ScalarType> dtype,
68+
::executorch::aten::optional<ScalarType> dtype,
6469
Tensor& out) {
6570
(void)ctx;
6671

backends/cadence/fusion_g3/operators/op_mul.cpp

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,22 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/fusion_g3/operators/tensor_util.h>
9+
#include <executorch/backends/cadence/fusion_g3/operators/operators.h>
10+
11+
#include <xa_nnlib_kernels_api.h>
12+
13+
#include <executorch/backends/cadence/fusion_g3/operators/xt_macros.h>
1014
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1115
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
1216
#include <executorch/runtime/kernel/kernel_includes.h>
1317
#include <executorch/runtime/platform/assert.h>
14-
#include <xa_nnlib_kernels_api.h>
1518

16-
using exec_aten::Scalar;
17-
using exec_aten::ScalarType;
18-
using exec_aten::Tensor;
19-
using executorch::runtime::canCast;
20-
using torch::executor::Error;
21-
using torch::executor::KernelRuntimeContext;
19+
using ::executorch::aten::Scalar;
20+
using ::executorch::aten::ScalarType;
21+
using ::executorch::aten::Tensor;
22+
using ::executorch::runtime::canCast;
23+
using ::executorch::runtime::Error;
24+
using ::executorch::runtime::KernelRuntimeContext;
2225

2326
namespace cadence {
2427
namespace impl {

0 commit comments

Comments
 (0)