Skip to content

Commit d0eea50

Browse files
committed
Update broadcast tests xfails
Signed-off-by: Adrian Lundell <[email protected]> Change-Id: I6224ca5a890556ca9cf0929440f7aea8da1b5028
1 parent 1348f0c commit d0eea50

File tree

3 files changed

+32
-46
lines changed

3 files changed

+32
-46
lines changed

backends/cortex_m/ops/operators.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,9 @@ def quantized_add_meta(
138138
output_multiplier: int,
139139
output_shift: int,
140140
) -> torch.Tensor:
141+
assert (
142+
self.shape == other.shape
143+
), "Broadcasting not yet supported in Cortex-M backend."
141144
broadcasted_shape = torch.broadcast_shapes(self.shape, other.shape)
142145
return torch.empty(broadcasted_shape, dtype=torch.int8, device=self.device)
143146

@@ -156,6 +159,9 @@ def quantized_add_impl(
156159
output_multiplier: int,
157160
output_shift: int,
158161
) -> torch.Tensor:
162+
assert (
163+
self.shape == other.shape
164+
), "Broadcasting not yet supported in Cortex-M backend."
159165
self_shifted = (self.to(torch.int32) - self_zero_point) << SHIFT_INT8
160166
self_fp = requantize_cmsis(self_shifted, self_multiplier, self_shift)
161167

@@ -197,6 +203,9 @@ def quantized_mul_meta(
197203
output_shift: int,
198204
) -> torch.Tensor:
199205
# Broadcast to output shape
206+
assert (
207+
self.shape == other.shape
208+
), "Broadcasting not yet supported in Cortex-M backend."
200209
broadcasted_shape = torch.broadcast_shapes(self.shape, other.shape)
201210
return torch.empty(broadcasted_shape, dtype=torch.int8, device=self.device)
202211

@@ -214,6 +223,9 @@ def quantized_mul_impl(
214223
# CMSIS-NN kernel multiplies raw int8 tensors (after zero-point offset) and
215224
# only uses the output multiplier/shift for rescaling. Mirror that here to
216225
# keep the composite implementation numerically aligned with the backend.
226+
assert (
227+
self.shape == other.shape
228+
), "Broadcasting not yet supported in Cortex-M backend."
217229
self_int = self.to(torch.int32) - self_zero_point
218230
other_int = other.to(torch.int32) - other_zero_point
219231
result_fp = self_int * other_int

backends/cortex_m/test/ops/test_add.py

Lines changed: 6 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ class CortexMAlphaAdd(ModelAlpha):
139139
}
140140

141141

142-
dialect_xfails = {
142+
xfails = {
143143
"self_scalar": (
144144
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
145145
AttributeError,
@@ -152,46 +152,21 @@ class CortexMAlphaAdd(ModelAlpha):
152152
"Expecting kwargs for aten op IR to be empty - alpha arg not supported.",
153153
AssertionError,
154154
),
155+
"broadcast_1": "Broadcasting not yet supported in Cortex-M backend",
156+
"broadcast_2": "Broadcasting not yet supported in Cortex-M backend",
157+
"broadcast_3": "Broadcasting not yet supported in Cortex-M backend",
155158
}
156159

157160

158-
@parametrize("test_case", test_cases, xfails=dialect_xfails)
161+
@parametrize("test_case", test_cases, xfails=xfails)
159162
def test_dialect_add(test_case):
160163
tester = CortexMTester(test_case.model, test_case.example_inputs)
161164
tester.test_dialect(
162165
test_case.model.ops_before_transforms, test_case.model.ops_after_transforms
163166
)
164167

165168

166-
implementation_xfails = {
167-
"self_scalar": (
168-
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
169-
AttributeError,
170-
),
171-
"scalar_scalar": (
172-
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
173-
AttributeError,
174-
),
175-
"broadcast_1": (
176-
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
177-
RuntimeError,
178-
),
179-
"broadcast_2": (
180-
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
181-
RuntimeError,
182-
),
183-
"broadcast_3": (
184-
" assert failed (input1.sizes() == input2.sizes()): Input1 and Input2 must have the same sizes.",
185-
RuntimeError,
186-
),
187-
"alpha": (
188-
"Expecting kwargs for aten op IR to be empty - alpha arg not supported.",
189-
AssertionError,
190-
),
191-
}
192-
193-
194-
@parametrize("test_case", test_cases, xfails=implementation_xfails)
169+
@parametrize("test_case", test_cases, xfails=xfails)
195170
def test_implementation_add(test_case):
196171
tester = CortexMTester(test_case.model, test_case.example_inputs)
197172
tester.test_implementation()

backends/cortex_m/test/ops/test_mul.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -59,16 +59,6 @@ class CortexMTensorMul(Model):
5959
}
6060

6161

62-
class CortexMTensorMulBroadCast(Model):
63-
ops_before_transforms = {
64-
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 1,
65-
}
66-
67-
ops_after_transforms = {
68-
"executorch_exir_dialects_edge__ops_aten_mul_Tensor": 1,
69-
}
70-
71-
7262
test_cases = {
7363
"self_scalar": McuTestCase(
7464
CortexMSelfMul(),
@@ -107,15 +97,15 @@ class CortexMTensorMulBroadCast(Model):
10797
(1000.0, torch.ones(1)),
10898
),
10999
"broadcast_1": McuTestCase(
110-
CortexMTensorMulBroadCast(),
100+
CortexMTensorMul(),
111101
(torch.ones(1), torch.ones(2, 2, 2, 2)),
112102
),
113103
"broadcast_2": McuTestCase(
114-
CortexMTensorMulBroadCast(),
104+
CortexMTensorMul(),
115105
(torch.ones((2, 1, 1, 1)), torch.ones(1)),
116106
),
117107
"broadcast_3": McuTestCase(
118-
CortexMTensorMulBroadCast(),
108+
CortexMTensorMul(),
119109
(
120110
ramp_tensor(-2, 2, (2, 1, 2, 1)),
121111
ramp_tensor(-5, 5, (1, 2, 1, 2)),
@@ -125,8 +115,17 @@ class CortexMTensorMulBroadCast(Model):
125115

126116

127117
xfail_cases = {
128-
"self_scalar": "lift_constant_tensor_pass assumes fake tensors for scalars",
129-
"scalar_scalar": "lift_constant_tensor_pass assumes fake tensors for scalars",
118+
"self_scalar": (
119+
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
120+
AttributeError,
121+
),
122+
"scalar_scalar": (
123+
"'float' object has not attribute 'fake_mode' - scalar only ops not supported.",
124+
AttributeError,
125+
),
126+
"broadcast_1": "Broadcasting not yet supported in Cortex-M backend",
127+
"broadcast_2": "Broadcasting not yet supported in Cortex-M backend",
128+
"broadcast_3": "Broadcasting not yet supported in Cortex-M backend",
130129
}
131130

132131

0 commit comments

Comments
 (0)