Skip to content

Commit aafcede

Browse files
committed
Arm backend: Enable linear 16a8w tests
Enable tests of int16 activations and int8 weight quantization. Test for large_rand is disabled to sort out why the test is flaky. Signed-off-by: Per Åstrand <[email protected]> Change-Id: I9de5d472f8862edebcf82c140399985db930c069
1 parent 2bd09f9 commit aafcede

File tree

2 files changed

+11
-4
lines changed

2 files changed

+11
-4
lines changed

backends/arm/scripts/parse_test_names.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,9 @@ def parse_test_name(
9595
op = op.removesuffix("_1d")
9696
op = op.removesuffix("_2d")
9797

98+
# Remove suffix for 16 bit activation and 8 bit weight test cases
99+
op = op.removesuffix("_16a8w")
100+
98101
assert target != "None", f"{test_name} does not contain one of {TARGETS}"
99102
assert (
100103
op in op_name_map.keys()

backends/arm/test/ops/test_linear.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -277,10 +277,14 @@ def get_symmetric_a16w8_linear_quantizer(
277277
)
278278

279279

280-
@common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT)
281-
@pytest.mark.xfail(
282-
reason="missing int16 linear ops support; fails at TOSA reference model run with Invalid TOSA graph"
283-
)
280+
test_data_all_16a8w = test_data_rank1_INT | test_data_rank4_INT
281+
# TODO: Remove large rand test as they are flaky until sorted out why: MLETORCH-1377
282+
for k in list(test_data_all_16a8w.keys()):
283+
if "large_rand" in k:
284+
test_data_all_16a8w.pop(k)
285+
286+
287+
@common.parametrize("test_data", test_data_all_16a8w)
284288
def test_linear_16a8w_tosa_INT(test_data: torch.Tensor):
285289
"""Test linear operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
286290
test_data, out_features, has_bias, per_channel_quantization = test_data()

0 commit comments

Comments
 (0)