|
8 | 8 |
|
9 | 9 | #pragma once
|
10 | 10 |
|
| 11 | +#include "executorch/runtime/core/exec_aten/exec_aten.h" |
| 12 | +#include "executorch/runtime/kernel/kernel_runtime_context.h" |
| 13 | + |
11 | 14 | #define ET_FORALL_CADENCE_QUANTIZED_TYPES(_) \
|
12 | 15 | _(uint8_t, Byte) \
|
13 | 16 | _(int8_t, Char)
|
14 | 17 |
|
15 |
| -using ::executorch::aten::IntArrayRef; |
16 |
| -using ::executorch::aten::optional; |
17 |
| -using ::executorch::aten::ScalarType; |
18 |
| -using ::executorch::aten::Tensor; |
19 |
| -using ::executorch::runtime::KernelRuntimeContext; |
20 |
| - |
21 | 18 | namespace cadence {
|
22 | 19 | namespace impl {
|
23 | 20 | namespace HiFi {
|
@@ -62,68 +59,68 @@ void quantized_relu_out(
|
62 | 59 | ::executorch::aten::Tensor& output);
|
63 | 60 |
|
64 | 61 | void quantized_linear_out(
|
65 |
| - __ET_UNUSED KernelRuntimeContext& ctx, |
66 |
| - const Tensor& in, |
67 |
| - const Tensor& weight, |
68 |
| - const Tensor& bias, |
| 62 | + ::executorch::runtime::KernelRuntimeContext& ctx, |
| 63 | + const ::executorch::aten::Tensor& in, |
| 64 | + const ::executorch::aten::Tensor& weight, |
| 65 | + const ::executorch::aten::Tensor& bias, |
69 | 66 | int64_t in_zero_point,
|
70 |
| - const Tensor& weight_zero_point, |
71 |
| - const Tensor& out_multiplier, |
72 |
| - const Tensor& out_shift, |
| 67 | + const ::executorch::aten::Tensor& weight_zero_point, |
| 68 | + const ::executorch::aten::Tensor& out_multiplier, |
| 69 | + const ::executorch::aten::Tensor& out_shift, |
73 | 70 | int64_t out_zero_point,
|
74 |
| - __ET_UNUSED const optional<Tensor>& offset, |
75 |
| - Tensor& out); |
| 71 | + const ::executorch::aten::optional<::executorch::aten::Tensor>& offset, |
| 72 | + ::executorch::aten::Tensor& out); |
76 | 73 |
|
77 | 74 | void quantized_linear_per_tensor_out(
|
78 |
| - __ET_UNUSED KernelRuntimeContext& ctx, |
79 |
| - const Tensor& in, |
80 |
| - const Tensor& weight, |
81 |
| - const Tensor& bias, |
| 75 | + ::executorch::runtime::KernelRuntimeContext& ctx, |
| 76 | + const ::executorch::aten::Tensor& in, |
| 77 | + const ::executorch::aten::Tensor& weight, |
| 78 | + const ::executorch::aten::Tensor& bias, |
82 | 79 | int64_t in_zero_point,
|
83 | 80 | int64_t weight_zero_point,
|
84 | 81 | int64_t out_multiplier,
|
85 | 82 | int64_t out_shift,
|
86 | 83 | int64_t out_zero_point,
|
87 |
| - __ET_UNUSED const optional<Tensor>& offset, |
88 |
| - Tensor& out); |
| 84 | + const ::executorch::aten::optional<::executorch::aten::Tensor>& offset, |
| 85 | + ::executorch::aten::Tensor& out); |
89 | 86 |
|
90 | 87 | void quantized_conv_out(
|
91 |
| - __ET_UNUSED KernelRuntimeContext& ctx, |
92 |
| - const Tensor& input, |
93 |
| - const Tensor& weight, |
94 |
| - const Tensor& bias, |
95 |
| - IntArrayRef stride, |
96 |
| - IntArrayRef padding, |
97 |
| - IntArrayRef dilation, |
| 88 | + ::executorch::runtime::KernelRuntimeContext& ctx, |
| 89 | + const ::executorch::aten::Tensor& input, |
| 90 | + const ::executorch::aten::Tensor& weight, |
| 91 | + const ::executorch::aten::Tensor& bias, |
| 92 | + ::executorch::aten::IntArrayRef stride, |
| 93 | + ::executorch::aten::IntArrayRef padding, |
| 94 | + ::executorch::aten::IntArrayRef dilation, |
98 | 95 | int64_t groups,
|
99 | 96 | int64_t in_zero_point,
|
100 |
| - const Tensor& weight_zero_point, |
101 |
| - const Tensor& bias_scale, |
| 97 | + const ::executorch::aten::Tensor& weight_zero_point, |
| 98 | + const ::executorch::aten::Tensor& bias_scale, |
102 | 99 | double output_scale,
|
103 | 100 | int64_t output_zero_point,
|
104 |
| - __ET_UNUSED const Tensor& out_multiplier, |
105 |
| - __ET_UNUSED const Tensor& out_shift, |
| 101 | + const ::executorch::aten::Tensor& out_multiplier, |
| 102 | + const ::executorch::aten::Tensor& out_shift, |
106 | 103 | bool channel_last,
|
107 |
| - Tensor& out); |
| 104 | + ::executorch::aten::Tensor& out); |
108 | 105 |
|
109 | 106 | void quantized_conv_per_tensor_out(
|
110 |
| - __ET_UNUSED KernelRuntimeContext& ctx, |
111 |
| - const Tensor& input, |
112 |
| - const Tensor& weight, |
113 |
| - const Tensor& bias, |
114 |
| - IntArrayRef stride, |
115 |
| - IntArrayRef padding, |
116 |
| - IntArrayRef dilation, |
| 107 | + ::executorch::runtime::KernelRuntimeContext& ctx, |
| 108 | + const ::executorch::aten::Tensor& input, |
| 109 | + const ::executorch::aten::Tensor& weight, |
| 110 | + const ::executorch::aten::Tensor& bias, |
| 111 | + ::executorch::aten::IntArrayRef stride, |
| 112 | + ::executorch::aten::IntArrayRef padding, |
| 113 | + ::executorch::aten::IntArrayRef dilation, |
117 | 114 | int64_t groups,
|
118 | 115 | int64_t in_zero_point,
|
119 | 116 | int64_t weight_zero_point,
|
120 | 117 | double bias_scale,
|
121 | 118 | double output_scale,
|
122 | 119 | int64_t output_zero_point,
|
123 |
| - __ET_UNUSED int64_t out_multiplier, |
124 |
| - __ET_UNUSED int64_t out_shift, |
| 120 | + int64_t out_multiplier, |
| 121 | + int64_t out_shift, |
125 | 122 | bool channel_last,
|
126 |
| - Tensor& out); |
| 123 | + ::executorch::aten::Tensor& out); |
127 | 124 |
|
128 | 125 | } // namespace native
|
129 | 126 | } // namespace HiFi
|
|
0 commit comments