Skip to content

Commit 7bd011f

Browse files
Updated name space of the operators by appending cadence
Signed-off-by: [email protected] <[email protected]>
1 parent 0bf646e commit 7bd011f

File tree

8 files changed

+64
-46
lines changed

8 files changed

+64
-46
lines changed

backends/cadence/aot/functions_fusion_g3.yaml

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,17 +20,17 @@
2020
- op: _softmax.out
2121
kernels:
2222
- arg_meta: null
23-
kernel_name: impl::FusionG3::softmax_out
23+
kernel_name: cadence::impl::G3::softmax_out
2424

2525
- op: add.out
2626
kernels:
2727
- arg_meta: null
28-
kernel_name: impl::FusionG3::add_out
28+
kernel_name: cadence::impl::G3::add_out
2929

3030
- op: add.Scalar_out
3131
kernels:
3232
- arg_meta: null
33-
kernel_name: impl::FusionG3::add_scalar_out
33+
kernel_name: cadence::impl::G3::add_scalar_out
3434

3535
- op: bmm.out
3636
kernels:
@@ -40,7 +40,7 @@
4040
- op: cat.out
4141
kernels:
4242
- arg_meta: null
43-
kernel_name: impl::FusionG3::cat_out
43+
kernel_name: cadence::impl::G3::cat_out
4444

4545
- op: clone.out
4646
kernels:
@@ -70,12 +70,12 @@
7070
- op: mul.out
7171
kernels:
7272
- arg_meta: null
73-
kernel_name: impl::FusionG3::mul_out
73+
kernel_name: cadence::impl::G3::mul_out
7474

7575
- op: mul.Scalar_out
7676
kernels:
7777
- arg_meta: null
78-
kernel_name: impl::FusionG3::mul_scalar_out
78+
kernel_name: cadence::impl::G3::mul_scalar_out
7979

8080
- op: permute_copy.out
8181
kernels:
@@ -115,5 +115,4 @@
115115
- op: native_layer_norm.out
116116
kernels:
117117
- arg_meta: null
118-
kernel_name: impl::FusionG3::native_layer_norm_out
119-
118+
kernel_name: cadence::impl::G3::native_layer_norm_out

backends/cadence/fusion_g3/operators/op_add.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@ using torch::executor::KernelRuntimeContext;
2020
using executorch::runtime::canCast;
2121
using torch::executor::Error;
2222

23+
namespace cadence {
2324
namespace impl {
24-
namespace FusionG3 {
25+
namespace G3 {
2526
namespace native {
2627

2728

@@ -242,6 +243,6 @@ Tensor& add_scalar_out(KernelRuntimeContext& ctx,
242243
}
243244

244245
} // namespace native
245-
} // namespace FusionG3
246+
} // namespace G3
246247
} // namespace impl
247-
248+
} //namespace cadence

backends/cadence/fusion_g3/operators/op_cat.cpp

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,20 @@ using exec_aten::Tensor;
1818
using torch::executor::KernelRuntimeContext;
1919
using torch::executor::Error;
2020

21+
/* ScalarType in Executorch do not have support for below data types.
22+
* So, creating a placeholder for these data types. Once, ScalarTypes is
23+
* updated to have support for below data types, these can be removed and
24+
* operator need to be updated accordingly
25+
*/
26+
enum datatype {
27+
Ushort = 20,
28+
Uint = 23,
29+
};
30+
2131

32+
namespace cadence {
2233
namespace impl {
23-
namespace FusionG3 {
34+
namespace G3 {
2435
namespace native {
2536

2637

@@ -95,6 +106,22 @@ Tensor& cat_out(KernelRuntimeContext& ctx,
95106
inp_shapes_size[0], tensors.size(), (int)dim, sizeof(char));
96107

97108
}
109+
if(out.scalar_type() == (ScalarType)Uint)
110+
{
111+
xa_nn_cat(out_data, out_shapes, inp_tensors, inp_tensors_shapes,
112+
inp_shapes_size[0], tensors.size(), (int)dim, sizeof(int));
113+
}
114+
else if(out.scalar_type() == (ScalarType)Ushort)
115+
{
116+
xa_nn_cat(out_data, out_shapes, inp_tensors, inp_tensors_shapes,
117+
inp_shapes_size[0], tensors.size(), (int)dim, sizeof(short));
118+
}
119+
else if(out.scalar_type() == ScalarType::Byte)
120+
{
121+
xa_nn_cat(out_data, out_shapes, inp_tensors, inp_tensors_shapes,
122+
inp_shapes_size[0], tensors.size(), (int)dim, sizeof(char));
123+
124+
}
98125
else
99126
{
100127
// Special handling when all inputs are 1D-empty tensors for aten consistency
@@ -145,5 +172,6 @@ Tensor& cat_out(KernelRuntimeContext& ctx,
145172
}
146173

147174
} // namespace native
148-
} // namespace FusionG3
149-
} // namespace impl
175+
} // namespace G3
176+
} // namespace impl
177+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_dequantize.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,9 @@ using optional = exec_aten::optional<T>;
3636
/**
3737
* For an input tensor, use the scale and zero_point arguments to quantize it.
3838
*/
39+
namespace cadence {
3940
namespace impl {
40-
namespace FusionG3 {
41+
namespace G3 {
4142
namespace native {
4243

4344
namespace {
@@ -803,5 +804,6 @@ Tensor& dequantize_per_token_out(
803804
}
804805

805806
} // namespace native
806-
} // namespace FusionG3
807+
} // namespace G3
807808
} // namespace impl
809+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_mul.cpp

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,9 @@ using torch::executor::KernelRuntimeContext;
1919
using executorch::runtime::canCast;
2020
using torch::executor::Error;
2121

22+
namespace cadence {
2223
namespace impl {
23-
namespace FusionG3 {
24+
namespace G3 {
2425
namespace native {
2526

2627
Tensor& mul_out(KernelRuntimeContext& ctx,
@@ -203,24 +204,6 @@ Tensor& mul_scalar_out(KernelRuntimeContext& ctx,
203204
}
204205

205206
} // namespace impl
206-
} // namespace FusionG3
207+
} // namespace G3
207208
} // namespace native
208-
209-
210-
211-
212-
213-
214-
215-
216-
217-
218-
219-
220-
221-
222-
223-
224-
225-
226-
209+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@ using ScalarType = exec_aten::ScalarType;
2020
using IntArrayRef = exec_aten::ArrayRef<int64_t>;
2121
using torch::executor::Error;
2222

23+
namespace cadence {
2324
namespace impl {
24-
namespace FusionG3 {
25+
namespace G3 {
2526
namespace native {
2627

2728
namespace {
@@ -272,5 +273,6 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_layer_norm_out(
272273
}
273274

274275
} // namespace native
275-
} // namespace FusionG3
276+
} // namespace G3
276277
} // namespace impl
278+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_quantize.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ using torch::executor::Error;
3434
/**
3535
* For an input tensor, use the scale and zero_point arguments to quantize it.
3636
*/
37-
37+
namespace cadence {
3838
namespace impl {
3939
namespace FusionG3 {
4040
namespace native {
@@ -792,6 +792,7 @@ Tensor& quantize_per_token_out(
792792
input, scale, zero_point, quant_min, quant_max, dtype, out);
793793
}
794794

795-
}; // namespace native
796-
}; // namespace FusionG3
797-
}; // namespace impl
795+
} // namespace native
796+
} // namespace G3
797+
} // namespace impl
798+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_softmax.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@ using exec_aten::Tensor;
2020
using torch::executor::KernelRuntimeContext;
2121
using torch::executor::Error;
2222

23+
namespace cadence {
2324
namespace impl {
24-
namespace FusionG3 {
25+
namespace G3 {
2526
namespace native {
2627

2728
Tensor& softmax_out(
@@ -113,5 +114,6 @@ Tensor& softmax_out(
113114
}
114115

115116
} // namespace native
116-
} // namespace FusionG3
117-
} // namespace impl
117+
} // namespace G3
118+
} // namespace impl
119+
} // namespace cadence

0 commit comments

Comments
 (0)