|
8 | 8 |
|
9 | 9 | from typing import Tuple |
10 | 10 |
|
11 | | -import pytest |
12 | | - |
13 | 11 | import torch |
14 | | -from executorch.backends.arm.test import common |
| 12 | +from executorch.backends.arm.quantizer.arm_quantizer import ( |
| 13 | + get_symmetric_a16w8_quantization_config, |
| 14 | + TOSAQuantizer, |
| 15 | +) |
| 16 | +from executorch.backends.arm.test import common, conftest |
15 | 17 |
|
16 | 18 | from executorch.backends.arm.test.tester.test_pipeline import ( |
17 | 19 | EthosU55PipelineINT, |
|
20 | 22 | TosaPipelineINT, |
21 | 23 | VgfPipeline, |
22 | 24 | ) |
| 25 | +from executorch.backends.arm.tosa_specification import TosaSpecification |
| 26 | +from executorch.backends.xnnpack.test.tester import Quantize |
23 | 27 |
|
24 | 28 | aten_op = "torch.ops.aten.linear.default" |
25 | 29 |
|
@@ -143,7 +147,6 @@ def test_linear_tosa_FP(test_data: torch.Tensor): |
143 | 147 | pipeline.run() |
144 | 148 |
|
145 | 149 |
|
146 | | -@pytest.mark.flaky(reruns=5) # TODO: Investigate flakyness. |
147 | 150 | @common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT) |
148 | 151 | def test_linear_tosa_INT(test_data: torch.Tensor): |
149 | 152 | test_data, out_features, has_bias, per_channel_quantization = test_data() |
@@ -258,3 +261,61 @@ def test_linear_vgf_INT(test_data: torch.Tensor): |
258 | 261 | per_channel_quantization=per_channel_quantization, |
259 | 262 | ) |
260 | 263 | pipeline.run() |
| 264 | + |
| 265 | + |
| 266 | +def get_symmetric_a16w8_linear_quantizer( |
| 267 | + u55_config=False, per_channel_quantization=False |
| 268 | +): |
| 269 | + tosa_version = conftest.get_option("tosa_version") |
| 270 | + tosa_profiles = { |
| 271 | + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT+int16"), |
| 272 | + } |
| 273 | + |
| 274 | + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) |
| 275 | + quantizer.set_global( |
| 276 | + get_symmetric_a16w8_quantization_config(is_per_channel=per_channel_quantization) |
| 277 | + ) |
| 278 | + quantizer.set_module_type( |
| 279 | + torch.nn.Linear, |
| 280 | + get_symmetric_a16w8_quantization_config( |
| 281 | + is_per_channel=per_channel_quantization |
| 282 | + ), |
| 283 | + ) |
| 284 | + |
| 285 | + return Quantize( |
| 286 | + quantizer, |
| 287 | + get_symmetric_a16w8_quantization_config( |
| 288 | + is_per_channel=per_channel_quantization |
| 289 | + ), |
| 290 | + ) |
| 291 | + |
| 292 | + |
| 293 | +@common.parametrize("test_data", test_data_rank1_INT, test_data_rank4_INT) |
| 294 | +def test_linear_16a8w_tosa_INT(test_data: torch.Tensor): |
| 295 | + """Test linear operation with 16A8W quantization (16-bit activations, 8-bit weights)""" |
| 296 | + test_data, out_features, has_bias, per_channel_quantization = test_data() |
| 297 | + in_features = test_data.shape[-1] |
| 298 | + |
| 299 | + # Create pipeline with custom 16A8W quantization config |
| 300 | + pipeline = TosaPipelineINT[input_t1]( |
| 301 | + Linear( |
| 302 | + in_features=in_features, |
| 303 | + out_features=out_features, |
| 304 | + bias=has_bias, |
| 305 | + ), |
| 306 | + (test_data,), |
| 307 | + aten_op, |
| 308 | + exir_op=[], |
| 309 | + per_channel_quantization=per_channel_quantization, |
| 310 | + use_to_edge_transform_and_lower=True, |
| 311 | + tosa_extensions=["int16"], |
| 312 | + ) |
| 313 | + |
| 314 | + pipeline.change_args( |
| 315 | + "quantize", |
| 316 | + get_symmetric_a16w8_linear_quantizer( |
| 317 | + per_channel_quantization=per_channel_quantization |
| 318 | + ), |
| 319 | + ) |
| 320 | + # Run the pipeline |
| 321 | + pipeline.run() |
0 commit comments