Skip to content

Commit e4b1d51

Browse files
pytorchbotNinja91
andauthored
Arm backend: Add 16A8W FCNode support with BMM dependency fix (#14218)
This PR was created by the merge bot to help merge the original PR into the main branch. ghstack PR number: #13801 by @Ninja91 ^ Please use this as the source of truth for the PR details, comments, and reviews ghstack PR base: https://github.com/pytorch/executorch/tree/gh/Ninja91/14/base ghstack PR head: https://github.com/pytorch/executorch/tree/gh/Ninja91/14/head Merge bot PR base: https://github.com/pytorch/executorch/tree/gh/Ninja91/13/orig Merge bot PR head: https://github.com/pytorch/executorch/tree/gh/Ninja91/14/orig @diff-train-skip-merge --------- Co-authored-by: Nitin Jain <[email protected]>
1 parent fe26bfd commit e4b1d51

File tree

3 files changed

+112
-2
lines changed

3 files changed

+112
-2
lines changed

backends/arm/operators/op_bmm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def define_node(
5555
validate_valid_dtype(
5656
self.target,
5757
[*inputs, output],
58-
[ts.DType.INT8, ts.DType.FP32],
58+
[ts.DType.INT8, ts.DType.INT16, ts.DType.FP32],
5959
output.tosa_spec,
6060
)
6161

backends/arm/test/ops/test_addmm.py

Lines changed: 110 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,23 @@
55

66
from typing import Tuple
77

8+
import pytest
89
import torch
10+
from executorch.backends.arm.quantizer.arm_quantizer import (
11+
get_symmetric_a16w8_quantization_config,
12+
TOSAQuantizer,
13+
)
914

10-
from executorch.backends.arm.test import common
15+
from executorch.backends.arm.test import common, conftest
1116
from executorch.backends.arm.test.tester.test_pipeline import (
1217
EthosU55PipelineINT,
1318
EthosU85PipelineINT,
1419
TosaPipelineFP,
1520
TosaPipelineINT,
1621
VgfPipeline,
1722
)
23+
from executorch.backends.arm.tosa.specification import TosaSpecification
24+
from executorch.backends.xnnpack.test.tester import Quantize
1825

1926
aten_op = "torch.ops.aten.addmm.default"
2027

@@ -182,3 +189,105 @@ def test_addmm_vgf_INT(test_data: input_t1):
182189
tosa_version="TOSA-1.0+INT",
183190
)
184191
pipeline.run()
192+
193+
194+
def get_symmetric_a16w8_addmm_quantizer(per_channel_quantization=False):
195+
tosa_version = conftest.get_option("tosa_version")
196+
tosa_profiles = {
197+
"1.0": TosaSpecification.create_from_string("TOSA-1.0+INT+int16"),
198+
}
199+
200+
quantizer = TOSAQuantizer(tosa_profiles[tosa_version])
201+
quantizer.set_global(
202+
get_symmetric_a16w8_quantization_config(is_per_channel=per_channel_quantization)
203+
)
204+
205+
return Quantize(
206+
quantizer,
207+
get_symmetric_a16w8_quantization_config(
208+
is_per_channel=per_channel_quantization
209+
),
210+
)
211+
212+
213+
@common.parametrize("test_data", test_data_suite)
214+
@pytest.mark.xfail(
215+
reason="missing int16 addmm ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13979"
216+
)
217+
def test_addmm_16a8w_tosa_INT(test_data: input_t1):
218+
"""Test addmm (FC layer) operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
219+
per_channel_quantization = False
220+
221+
pipeline = TosaPipelineINT[input_t1](
222+
Addmm(),
223+
(*test_data,),
224+
aten_op=[],
225+
exir_op=[],
226+
per_channel_quantization=per_channel_quantization,
227+
use_to_edge_transform_and_lower=True,
228+
tosa_extensions=["int16"],
229+
)
230+
231+
pipeline.change_args(
232+
"quantize",
233+
get_symmetric_a16w8_addmm_quantizer(
234+
per_channel_quantization=per_channel_quantization
235+
),
236+
)
237+
pipeline.run()
238+
239+
240+
@common.parametrize("test_data", test_data_suite)
241+
@common.XfailIfNoCorstone300
242+
@pytest.mark.xfail(
243+
reason="Vela compilation fails with 'Invalid arguments' for int16 addmm operations"
244+
)
245+
def test_addmm_16a8w_u55_INT16(test_data: input_t1):
246+
"""Test addmm (FC layer) operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""
247+
per_channel_quantization = False
248+
249+
pipeline = EthosU55PipelineINT[input_t1](
250+
Addmm(),
251+
(*test_data,),
252+
aten_ops=[],
253+
exir_ops=[],
254+
per_channel_quantization=per_channel_quantization,
255+
use_to_edge_transform_and_lower=True,
256+
run_on_fvp=True,
257+
)
258+
259+
pipeline.change_args(
260+
"quantize",
261+
get_symmetric_a16w8_addmm_quantizer(
262+
per_channel_quantization=per_channel_quantization
263+
),
264+
)
265+
pipeline.run()
266+
267+
268+
@common.parametrize("test_data", test_data_suite)
269+
@common.XfailIfNoCorstone320
270+
@pytest.mark.xfail(
271+
reason="Vela compilation fails with 'Invalid arguments' for int16 addmm operations"
272+
)
273+
def test_addmm_16a8w_u85_INT16(test_data: input_t1):
274+
"""Test addmm (FC layer) operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)"""
275+
per_channel_quantization = False
276+
277+
pipeline = EthosU85PipelineINT[input_t1](
278+
Addmm(),
279+
(*test_data,),
280+
aten_ops=[],
281+
exir_ops=[],
282+
per_channel_quantization=per_channel_quantization,
283+
use_to_edge_transform_and_lower=True,
284+
run_on_fvp=True,
285+
)
286+
287+
pipeline.change_args(
288+
"quantize",
289+
get_symmetric_a16w8_addmm_quantizer(
290+
per_channel_quantization=per_channel_quantization
291+
),
292+
)
293+
pipeline.run()

backends/arm/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ def define_arm_tests():
1414
# Operators
1515
test_files += [
1616
"ops/test_add.py",
17+
"ops/test_addmm.py",
1718
"ops/test_avg_pool2d.py",
1819
"ops/test_cat.py",
1920
"ops/test_linear.py",

0 commit comments

Comments
 (0)