Skip to content

Commit ea773b1

Browse files
committed
[Backend Tester] Add backend operator test suite skeleton
1 parent f9a3ca8 commit ea773b1

28 files changed

+1240
-3
lines changed

backends/test/harness/tester.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -366,11 +366,11 @@ def _assert_outputs_equal(model_output, ref_output, atol=1e-03, rtol=1e-03):
366366
f"Output {i} does not match reference output.\n"
367367
f"\tGiven atol: {atol}, rtol: {rtol}.\n"
368368
f"\tOutput tensor shape: {model.shape}, dtype: {model.dtype}\n"
369-
f"\tDifference: max: {torch.max(model-ref)}, abs: {torch.max(torch.abs(model-ref))}, mean abs error: {torch.mean(torch.abs(model-ref))}.\n"
369+
f"\tDifference: max: {torch.max(model-ref)}, abs: {torch.max(torch.abs(model-ref))}, mean abs error: {torch.mean(torch.abs(model-ref).to(torch.double))}.\n"
370370
f"\t-- Model vs. Reference --\n"
371371
f"\t Numel: {model.numel()}, {ref.numel()}\n"
372372
f"\tMedian: {model.median()}, {ref.median()}\n"
373-
f"\t Mean: {model.mean()}, {ref.mean()}\n"
373+
f"\t Mean: {model.to(torch.double).mean()}, {ref.to(torch.double).mean()}\n"
374374
f"\t Max: {model.max()}, {ref.max()}\n"
375375
f"\t Min: {model.min()}, {ref.min()}\n"
376376
)

backends/test/suite/README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Backend Test Suite
2+
3+
This directory contains tests that validate correctness and coverage of backends. These tests are written such that the backend is treated as a black box. The test logic verifies that the backend is able to handle a given pattern without erroring out (not partitioning is fine) and is able to run the graphs and yield reasonable outputs. As backends may differ significantly in implementation, numerical bounds are intentionally left loose.
4+
5+
## Backend Registration
6+
7+
To plug into the test framework, each backend should provide an implementation of the Tester class, defined in backends/test/harness/tester.py. Backends can provide implementations of each stage, or use the default implementation, as appropriate.
8+
9+
At a minimum, the backend will likely need to provide a custom implementation of the Partition and ToEdgeTransformAndLower stages using the appropriate backend partitioner. See backends/xnnpack/test/tester/tester.py for an example implementation.
10+
11+
Once a tester is available, the backend flow(s) can be added in __init__.py in this directory by adding an entry to `ALL_TESTER_FLOWS`. Each flow entry consists of a name (used in the test case naming) and a function to instantiate a tester for a given model and input tuple.
12+
13+
## Test Cases
14+
15+
Operator test cases are defined under the operators/ directory. Tests are written in a backend-independent manner, and each test is programmatically expanded to generate a variant for each registered backend flow. The `@operator_test` decorator is applied to each test class to trigger this behavior. Tests can also be tagged with an appropriate type specifier, such as `@dtype_test`, to generate variants for each dtype. The decorators and "magic" live in __init__.py in this directory.

backends/test/suite/TARGETS

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
load(":targets.bzl", "define_common_targets")
2+
3+
define_common_targets(is_fbcode = True)

backends/test/suite/__init__.py

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
import logging
11+
import os
12+
import unittest
13+
14+
from enum import Enum
15+
from typing import Any, Callable, Tuple
16+
17+
import torch
18+
from executorch.backends.test.harness import Tester
19+
20+
logger = logging.getLogger(__name__)
21+
logger.setLevel(logging.INFO)
22+
23+
24+
# Read enabled backends from the environment variable. Enable all if
25+
# not specified (signalled by None).
26+
def get_enabled_backends():
27+
et_test_backends = os.environ.get("ET_TEST_ENABLED_BACKENDS")
28+
if et_test_backends is not None:
29+
return et_test_backends.split(",")
30+
else:
31+
return None
32+
33+
34+
_ENABLED_BACKENDS = get_enabled_backends()
35+
36+
37+
def is_backend_enabled(backend):
38+
if _ENABLED_BACKENDS is None:
39+
return True
40+
else:
41+
return backend in _ENABLED_BACKENDS
42+
43+
44+
ALL_TEST_FLOWS = []
45+
46+
if is_backend_enabled("xnnpack"):
47+
from executorch.backends.xnnpack.test.tester import Tester as XnnpackTester
48+
49+
XNNPACK_TEST_FLOW = ("xnnpack", XnnpackTester)
50+
ALL_TEST_FLOWS.append(XNNPACK_TEST_FLOW)
51+
52+
if is_backend_enabled("coreml"):
53+
from executorch.backends.apple.coreml.test.tester import CoreMLTester
54+
55+
COREML_TEST_FLOW = ("coreml", CoreMLTester)
56+
ALL_TEST_FLOWS.append(COREML_TEST_FLOW)
57+
58+
59+
DTYPES = [
60+
torch.int8,
61+
torch.uint8,
62+
torch.int16,
63+
torch.uint16,
64+
torch.int32,
65+
torch.uint32,
66+
torch.int64,
67+
torch.uint64,
68+
torch.float16,
69+
torch.float32,
70+
torch.float64,
71+
]
72+
73+
FLOAT_DTYPES = [
74+
torch.float16,
75+
torch.float32,
76+
torch.float64,
77+
]
78+
79+
80+
# The type of test function. This controls the test generation and expected signature.
81+
# Standard tests are run, as is. Dtype tests get a variant generated for each dtype and
82+
# take an additional dtype parameter.
83+
class TestType(Enum):
84+
STANDARD = 1
85+
DTYPE = 2
86+
87+
88+
# Function annotation for dtype tests. This instructs the test framework to run the test
89+
# for each supported dtype and to pass dtype as a test parameter.
90+
def dtype_test(func):
91+
func.test_type = TestType.DTYPE
92+
return func
93+
94+
95+
# Class annotation for operator tests. This triggers the test framework to register
96+
# the tests.
97+
def operator_test(cls):
98+
_create_tests(cls)
99+
return cls
100+
101+
102+
# Generate test cases for each backend flow.
103+
def _create_tests(cls):
104+
for key in dir(cls):
105+
if key.startswith("test_"):
106+
_expand_test(cls, key)
107+
108+
109+
# Expand a test into variants for each registered flow.
110+
def _expand_test(cls, test_name: str):
111+
test_func = getattr(cls, test_name)
112+
for flow_name, tester_factory in ALL_TEST_FLOWS:
113+
_create_test_for_backend(cls, test_func, flow_name, tester_factory)
114+
delattr(cls, test_name)
115+
116+
117+
def _make_wrapped_test(test_func, *args, **kwargs):
118+
def wrapped_test(self):
119+
test_func(self, *args, **kwargs)
120+
121+
return wrapped_test
122+
123+
124+
def _make_wrapped_dtype_test(test_func, dtype, tester_factory):
125+
def wrapped_test(self):
126+
test_func(self, dtype, tester_factory)
127+
128+
return wrapped_test
129+
130+
131+
def _create_test_for_backend(
132+
cls,
133+
test_func: Callable,
134+
flow_name: str,
135+
tester_factory: Callable[[torch.nn.Module, Tuple[Any]], Tester],
136+
):
137+
test_type = getattr(test_func, "test_type", TestType.STANDARD)
138+
139+
if test_type == TestType.STANDARD:
140+
wrapped_test = _make_wrapped_test(test_func, tester_factory)
141+
test_name = f"{test_func.__name__}_{flow_name}"
142+
setattr(cls, test_name, wrapped_test)
143+
elif test_type == TestType.DTYPE:
144+
for dtype in DTYPES:
145+
# wrapped_test = _make_wrapped_dtype_test(test_func, dtype, tester_factory)
146+
wrapped_test = _make_wrapped_test(test_func, dtype, tester_factory)
147+
dtype_name = str(dtype)[6:] # strip "torch."
148+
test_name = f"{test_func.__name__}_{dtype_name}_{flow_name}"
149+
setattr(cls, test_name, wrapped_test)
150+
else:
151+
raise NotImplementedError(f"Unknown test type {test_type}.")
152+
153+
154+
class OperatorTest(unittest.TestCase):
155+
def _test_op(self, model, inputs, tester_factory):
156+
tester = (
157+
tester_factory(
158+
model,
159+
inputs,
160+
)
161+
.export()
162+
.to_edge_transform_and_lower()
163+
)
164+
165+
is_delegated = any(
166+
n.target == torch._higher_order_ops.executorch_call_delegate
167+
for n in tester.stages[tester.cur].graph_module.graph.nodes
168+
if n.op == "call_function"
169+
)
170+
171+
# Only run the runtime test if the op was delegated.
172+
if is_delegated:
173+
(tester.to_executorch().serialize().run_method_and_compare_outputs())
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
from typing import Callable
11+
12+
import torch
13+
14+
from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest
15+
16+
17+
class Model(torch.nn.Module):
18+
def forward(self, x, y):
19+
return x + y
20+
21+
22+
class ModelAlpha(torch.nn.Module):
23+
def __init__(self, alpha):
24+
super().__init__()
25+
self.alpha = alpha
26+
27+
def forward(self, x, y):
28+
return torch.add(x, y, alpha=self.alpha)
29+
30+
31+
@operator_test
32+
class Add(OperatorTest):
33+
@dtype_test
34+
def test_add_dtype(self, dtype, tester_factory: Callable) -> None:
35+
self._test_op(
36+
Model(),
37+
(
38+
(torch.rand(2, 10) * 100).to(dtype),
39+
(torch.rand(2, 10) * 100).to(dtype),
40+
),
41+
tester_factory,
42+
)
43+
44+
def test_add_f32_bcast_first(self, tester_factory: Callable) -> None:
45+
self._test_op(
46+
Model(),
47+
(
48+
torch.randn(5),
49+
torch.randn(1, 5, 1, 5),
50+
),
51+
tester_factory,
52+
)
53+
54+
def test_add_f32_bcast_second(self, tester_factory: Callable) -> None:
55+
self._test_op(
56+
Model(),
57+
(
58+
torch.randn(4, 4, 2, 7),
59+
torch.randn(2, 7),
60+
),
61+
tester_factory,
62+
)
63+
64+
def test_add_f32_bcast_unary(self, tester_factory: Callable) -> None:
65+
self._test_op(
66+
Model(),
67+
(
68+
torch.randn(5),
69+
torch.randn(1, 1, 5),
70+
),
71+
tester_factory,
72+
)
73+
74+
def test_add_f32_alpha(self, tester_factory: Callable) -> None:
75+
self._test_op(
76+
ModelAlpha(alpha=2),
77+
(
78+
torch.randn(1, 25),
79+
torch.randn(1, 25),
80+
),
81+
tester_factory,
82+
)
Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
10+
from typing import Callable, Optional
11+
12+
import torch
13+
14+
from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest
15+
16+
17+
class Model(torch.nn.Module):
18+
def forward(self, x, y):
19+
return x / y
20+
21+
22+
class ModelWithRounding(torch.nn.Module):
23+
def __init__(self, rounding_mode: Optional[str]):
24+
super().__init__()
25+
self.rounding_mode = rounding_mode
26+
27+
def forward(self, x, y):
28+
return torch.div(x, y, rounding_mode=self.rounding_mode)
29+
30+
31+
@operator_test
32+
class Divide(OperatorTest):
33+
@dtype_test
34+
def test_divide_dtype(self, dtype, tester_factory: Callable) -> None:
35+
self._test_op(
36+
Model(),
37+
(
38+
(torch.rand(2, 10) * 100).to(dtype),
39+
(torch.rand(2, 10) * 100 + 0.1).to(
40+
dtype
41+
), # Adding 0.1 to avoid division by zero
42+
),
43+
tester_factory,
44+
)
45+
46+
def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None:
47+
self._test_op(
48+
Model(),
49+
(
50+
torch.randn(5),
51+
torch.randn(1, 5, 1, 5).abs()
52+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
53+
),
54+
tester_factory,
55+
)
56+
57+
def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None:
58+
self._test_op(
59+
Model(),
60+
(
61+
torch.randn(4, 4, 2, 7),
62+
torch.randn(2, 7).abs()
63+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
64+
),
65+
tester_factory,
66+
)
67+
68+
def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None:
69+
self._test_op(
70+
Model(),
71+
(
72+
torch.randn(5),
73+
torch.randn(1, 1, 5).abs()
74+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
75+
),
76+
tester_factory,
77+
)
78+
79+
def test_divide_f32_trunc(self, tester_factory: Callable) -> None:
80+
self._test_op(
81+
ModelWithRounding(rounding_mode="trunc"),
82+
(
83+
torch.randn(3, 4) * 10,
84+
torch.randn(3, 4).abs()
85+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
86+
),
87+
tester_factory,
88+
)
89+
90+
def test_divide_f32_floor(self, tester_factory: Callable) -> None:
91+
self._test_op(
92+
ModelWithRounding(rounding_mode="floor"),
93+
(
94+
torch.randn(3, 4) * 10,
95+
torch.randn(3, 4).abs()
96+
+ 0.1, # Using abs and adding 0.1 to avoid division by zero
97+
),
98+
tester_factory,
99+
)

0 commit comments

Comments
 (0)