Skip to content

Commit 3ea5560

Browse files
committed
Adjust tolerance for quantized XNN conv1d tests
1 parent ce93291 commit 3ea5560

File tree

1 file changed

+7
-3
lines changed

1 file changed

+7
-3
lines changed

backends/xnnpack/test/ops/test_conv1d.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,13 @@ def _test_conv1d(
122122
# For some tests we want to skip to_executorch because otherwise it will require the
123123
# quantized operators to be loaded and we don't want to do that in the test.
124124
if not skip_to_executorch:
125-
tester.to_executorch().serialize().run_method_and_compare_outputs(
126-
num_runs=10, atol=0.01, rtol=0.01
127-
)
125+
tester.to_executorch().serialize()
126+
if quantized:
127+
tester.run_method_and_compare_outputs(
128+
num_runs=10, atol=0.025, rtol=0.01
129+
)
130+
else:
131+
tester.run_method_and_compare_outputs()
128132

129133
def test_fp16_conv1d(self):
130134
inputs = (torch.randn(2, 2, 4).to(torch.float16),)

0 commit comments

Comments
 (0)