Skip to content

Commit dd7c05e

Browse files
committed
Address merge issues
Fix a merge issue causing the build to fail + update tests after merging of #15590 Signed-off-by: Adrian Lundell <[email protected]>
1 parent 8dda27b commit dd7c05e

File tree

2 files changed

+23
-23
lines changed

2 files changed

+23
-23
lines changed

backends/cortex_m/ops/op_quantized_linear.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,16 +21,15 @@ Tensor& quantized_linear_out(
2121
KernelRuntimeContext& context,
2222
const Tensor& input,
2323
const Tensor& weights,
24-
const Tensor& weight_zero_point,
25-
const Tensor& weight_multiplier,
26-
const Tensor& weight_shift,
2724
const torch::executor::optional<Tensor>& bias,
28-
const Tensor& bias_multiplier,
29-
const Tensor& bias_shift,
30-
const Tensor& scratch_buffer,
31-
const Scalar& output_zero_point,
32-
const Scalar& in_features,
33-
const Scalar& out_features,
25+
const torch::executor::optional<Tensor>& kernel_sum,
26+
const Scalar& input_offset,
27+
const Scalar& filter_offset,
28+
const Scalar& output_offset,
29+
const IntArrayRef requantize_multipliers,
30+
const IntArrayRef requantize_shifts,
31+
const Scalar& activation_max,
32+
const Scalar& activation_min,
3433
Tensor& out) {
3534
ET_LOG(Info, "quantized_linear_out: called");
3635

backends/cortex_m/test/ops/test_add.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -59,17 +59,6 @@ class CortexMTensorAdd(Model):
5959
}
6060

6161

62-
class CortexMTensorAddBroadcast(Model):
63-
# TODO: Quantize and accelerate broadcasted adds
64-
ops_before_transforms = {
65-
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 1,
66-
}
67-
68-
ops_after_transforms = {
69-
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 1,
70-
}
71-
72-
7362
class CortexMAlphaAdd(ModelAlpha):
7463
ops_before_transforms = {
7564
"executorch_exir_dialects_edge__ops_aten_add_Tensor": 1,
@@ -126,15 +115,15 @@ class CortexMAlphaAdd(ModelAlpha):
126115
(torch.rand(2, 2) * 10, torch.rand(2, 2)),
127116
),
128117
"broadcast_1": McuTestCase(
129-
CortexMTensorAddBroadcast(),
118+
CortexMTensorAdd(),
130119
(torch.ones(1), torch.ones(2, 2, 2, 2)),
131120
),
132121
"broadcast_2": McuTestCase(
133-
CortexMTensorAddBroadcast(),
122+
CortexMTensorAdd(),
134123
(torch.ones((2, 1, 1, 1)), torch.ones(1)),
135124
),
136125
"broadcast_3": McuTestCase(
137-
CortexMTensorAddBroadcast(),
126+
CortexMTensorAdd(),
138127
(
139128
ramp_tensor(-2, 2, (2, 1, 2, 1)),
140129
ramp_tensor(-5, 5, (1, 2, 1, 2)),
@@ -183,6 +172,18 @@ def test_dialect_add(test_case):
183172
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
184173
AttributeError,
185174
),
175+
"broadcast_1": (
176+
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
177+
RuntimeError,
178+
),
179+
"broadcast_2": (
180+
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
181+
RuntimeError,
182+
),
183+
"broadcast_3": (
184+
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
185+
RuntimeError,
186+
),
186187
"alpha": (
187188
"Expecting kwargs for aten op IR to be empty - alpha arg not supported.",
188189
AssertionError,

0 commit comments

Comments
 (0)