We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ce93291 commit 3ea5560Copy full SHA for 3ea5560
backends/xnnpack/test/ops/test_conv1d.py
@@ -122,9 +122,13 @@ def _test_conv1d(
122
# For some tests we want to skip to_executorch because otherwise it will require the
123
# quantized operators to be loaded and we don't want to do that in the test.
124
if not skip_to_executorch:
125
- tester.to_executorch().serialize().run_method_and_compare_outputs(
126
- num_runs=10, atol=0.01, rtol=0.01
127
- )
+ tester.to_executorch().serialize()
+ if quantized:
+ tester.run_method_and_compare_outputs(
128
+ num_runs=10, atol=0.025, rtol=0.01
129
+ )
130
+ else:
131
+ tester.run_method_and_compare_outputs()
132
133
def test_fp16_conv1d(self):
134
inputs = (torch.randn(2, 2, 4).to(torch.float16),)
0 commit comments