|
10 | 10 | import pytest
|
11 | 11 | import torch
|
12 | 12 | from executorch.backends.arm.quantizer import arm_quantizer
|
| 13 | +from executorch.backends.arm.quantizer.arm_quantizer import ( |
| 14 | + get_symmetric_a16w8_quantization_config, |
| 15 | + TOSAQuantizer, |
| 16 | +) |
13 | 17 | from executorch.backends.arm.test import common, conftest
|
14 | 18 | from executorch.backends.arm.test.tester.test_pipeline import (
|
15 | 19 | EthosU55PipelineINT,
|
@@ -235,3 +239,105 @@ def test_add_tensor_vgf_INT(test_data: input_t1):
|
235 | 239 | pipeline.run()
|
236 | 240 | except FileNotFoundError as e:
|
237 | 241 | pytest.skip(f"VKML executor_runner not found - not built - skip {e}")
|
| 242 | + |
| 243 | + |
| 244 | +def get_symmetric_a16w8_add_quantizer(per_channel_quantization=False): |
| 245 | + tosa_version = conftest.get_option("tosa_version") |
| 246 | + tosa_profiles = { |
| 247 | + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT+int16"), |
| 248 | + } |
| 249 | + |
| 250 | + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) |
| 251 | + quantizer.set_global( |
| 252 | + get_symmetric_a16w8_quantization_config(is_per_channel=per_channel_quantization) |
| 253 | + ) |
| 254 | + |
| 255 | + return Quantize( |
| 256 | + quantizer, |
| 257 | + get_symmetric_a16w8_quantization_config( |
| 258 | + is_per_channel=per_channel_quantization |
| 259 | + ), |
| 260 | + ) |
| 261 | + |
| 262 | + |
| 263 | +@common.parametrize("test_data", Add.test_data) |
| 264 | +@pytest.mark.xfail( |
| 265 | + reason="missing int16 add ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13730" |
| 266 | +) |
| 267 | +def test_add_tensor_16a8w_tosa_INT(test_data: input_t1): |
| 268 | + """Test add operation with 16A8W quantization (16-bit activations, 8-bit weights)""" |
| 269 | + per_channel_quantization = False |
| 270 | + |
| 271 | + pipeline = TosaPipelineINT[input_t1]( |
| 272 | + Add(), |
| 273 | + test_data(), |
| 274 | + aten_op, |
| 275 | + exir_op=[], |
| 276 | + per_channel_quantization=per_channel_quantization, |
| 277 | + use_to_edge_transform_and_lower=True, |
| 278 | + tosa_extensions=["int16"], |
| 279 | + ) |
| 280 | + |
| 281 | + pipeline.change_args( |
| 282 | + "quantize", |
| 283 | + get_symmetric_a16w8_add_quantizer( |
| 284 | + per_channel_quantization=per_channel_quantization |
| 285 | + ), |
| 286 | + ) |
| 287 | + pipeline.run() |
| 288 | + |
| 289 | + |
| 290 | +@common.parametrize("test_data", Add.test_data) |
| 291 | +@common.XfailIfNoCorstone300 |
| 292 | +@pytest.mark.xfail( |
| 293 | + reason="Vela compilation fails with 'Invalid arguments' for int16 add operations. See: https://github.com/pytorch/executorch/issues/13730" |
| 294 | +) |
| 295 | +def test_add_tensor_16a8w_u55_INT16(test_data: input_t1): |
| 296 | + """Test add operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)""" |
| 297 | + per_channel_quantization = False |
| 298 | + |
| 299 | + pipeline = EthosU55PipelineINT[input_t1]( |
| 300 | + Add(), |
| 301 | + test_data(), |
| 302 | + aten_op, |
| 303 | + exir_op, |
| 304 | + per_channel_quantization=per_channel_quantization, |
| 305 | + use_to_edge_transform_and_lower=True, |
| 306 | + run_on_fvp=True, |
| 307 | + ) |
| 308 | + |
| 309 | + pipeline.change_args( |
| 310 | + "quantize", |
| 311 | + get_symmetric_a16w8_add_quantizer( |
| 312 | + per_channel_quantization=per_channel_quantization |
| 313 | + ), |
| 314 | + ) |
| 315 | + pipeline.run() |
| 316 | + |
| 317 | + |
| 318 | +@common.parametrize("test_data", Add.test_data) |
| 319 | +@common.XfailIfNoCorstone320 |
| 320 | +@pytest.mark.xfail( |
| 321 | + reason="Vela compilation fails with 'Invalid arguments' for int16 add operations. See: https://github.com/pytorch/executorch/issues/13730" |
| 322 | +) |
| 323 | +def test_add_tensor_16a8w_u85_INT16(test_data: input_t1): |
| 324 | + """Test add operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)""" |
| 325 | + per_channel_quantization = False |
| 326 | + |
| 327 | + pipeline = EthosU85PipelineINT[input_t1]( |
| 328 | + Add(), |
| 329 | + test_data(), |
| 330 | + aten_op, |
| 331 | + exir_op, |
| 332 | + per_channel_quantization=per_channel_quantization, |
| 333 | + use_to_edge_transform_and_lower=True, |
| 334 | + run_on_fvp=True, |
| 335 | + ) |
| 336 | + |
| 337 | + pipeline.change_args( |
| 338 | + "quantize", |
| 339 | + get_symmetric_a16w8_add_quantizer( |
| 340 | + per_channel_quantization=per_channel_quantization |
| 341 | + ), |
| 342 | + ) |
| 343 | + pipeline.run() |
0 commit comments