Skip to content

Commit 9653a05

Browse files
committed
Add compliance suite skeleton and operator tests
ghstack-source-id: fbf42cf ghstack-comment-id: 3003538522 Pull-Request: #11960
1 parent 4b9d9eb commit 9653a05

File tree

9 files changed

+475
-0
lines changed

9 files changed

+475
-0
lines changed
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Operator Compliance Test Suite
2+
3+
This directory contains operator tests that all backends are expected to pass. While not every backend will implement every operator or permutation, the expectation is that backend partitioners will only partition nodes that the backend can support. The partitioner should never error out due to not supporting an input node.
4+
5+
## Backend Registration
6+
7+
To plug into the test framework, each backend should provide an implementation of the Tester class, defined in backends/test/harness/tester.py. Backends can provide implementations of each stage, or use the default implementation, as appropriate.
8+
9+
At a minimum, the backend will likely need to provide a custom implementation of the Partition and ToEdgeTransformAndLower stages using the appropriate backend partitioner. See backends/xnnpack/test/tester/tester.py for an example implementation.
10+
11+
Once a tester is available, the backend flow(s) can be added in __init__.py in this directory by adding an entry to `ALL_TESTER_FLOWS`. Each flow entry consists of a name (used in the test case naming) and a function to instantiate a tester for a given model and input tuple.
12+
13+
## Test Cases
14+
15+
Operator test cases are defined under the operators/ directory. Tests are written in a backend-independent manner, and each test is programmatically expanded to generate a variant for each registered backend flow. The `@operator_test` decorator is applied to each test class to trigger this behavior. Tests can also be tagged with an appropriate type specifier, such as `@dtype_test`, to generate variants for each dtype. The decorators and "magic" live in __init__.py in this directory.
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
load(":targets.bzl", "define_common_targets")
2+
3+
define_common_targets(is_fbcode = True)
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
import os
2+
import unittest
3+
4+
from enum import Enum
5+
from typing import Any, Callable, Tuple
6+
7+
import logging
8+
import torch
9+
from executorch.backends.test.harness import Tester
10+
11+
logger = logging.getLogger(__name__)
12+
logger.setLevel(logging.INFO)
13+
14+
15+
# Read enabled backends from the environment variable. Enable all if
16+
# not specified (signalled by None).
17+
def get_enabled_backends():
18+
et_test_backends = os.environ.get("ET_TEST_BACKENDS")
19+
if et_test_backends is not None:
20+
return et_test_backends.split(",")
21+
else:
22+
return None
23+
24+
_ENABLED_BACKENDS = get_enabled_backends()
25+
26+
def is_backend_enabled(backend):
27+
if _ENABLED_BACKENDS is None:
28+
return True
29+
else:
30+
return backend in _ENABLED_BACKENDS
31+
32+
ALL_TEST_FLOWS = []
33+
34+
if is_backend_enabled("xnnpack"):
35+
from executorch.backends.xnnpack.test.tester import Tester as XnnpackTester
36+
37+
XNNPACK_TEST_FLOW = ("xnnpack", XnnpackTester)
38+
ALL_TEST_FLOWS.append(XNNPACK_TEST_FLOW)
39+
40+
if is_backend_enabled("coreml"):
41+
from executorch.backends.apple.coreml.test.tester import CoreMLTester
42+
43+
COREML_TEST_FLOW = ("coreml", CoreMLTester)
44+
ALL_TEST_FLOWS.append(COREML_TEST_FLOW)
45+
46+
47+
DTYPES = [
48+
torch.int8,
49+
torch.uint8,
50+
torch.int16,
51+
torch.uint16,
52+
torch.int32,
53+
torch.uint32,
54+
torch.int64,
55+
torch.uint64,
56+
torch.float16,
57+
torch.float32,
58+
torch.float64,
59+
]
60+
61+
class TestType(Enum):
62+
STANDARD = 1
63+
DTYPE = 2
64+
65+
def dtype_test(func):
66+
setattr(func, "test_type", TestType.DTYPE)
67+
return func
68+
69+
def operator_test(cls):
70+
_create_tests(cls)
71+
return cls
72+
73+
def _create_tests(cls):
74+
for key in dir(cls):
75+
if key.startswith("test_"):
76+
_expand_test(cls, key)
77+
78+
def _expand_test(cls, test_name: str):
79+
test_func = getattr(cls, test_name)
80+
for (flow_name, tester_factory) in ALL_TEST_FLOWS:
81+
_create_test_for_backend(cls, test_func, flow_name, tester_factory)
82+
delattr(cls, test_name)
83+
84+
def _create_test_for_backend(
85+
cls,
86+
test_func: Callable,
87+
flow_name: str,
88+
tester_factory: Callable[[torch.nn.Module, Tuple[Any]], Tester]
89+
):
90+
test_type = getattr(test_func, "test_type", TestType.STANDARD)
91+
92+
if test_type == TestType.STANDARD:
93+
def wrapped_test(self):
94+
test_func(self, tester_factory)
95+
96+
test_name = f"{test_func.__name__}_{flow_name}"
97+
setattr(cls, test_name, wrapped_test)
98+
elif test_type == TestType.DTYPE:
99+
for dtype in DTYPES:
100+
def wrapped_test(self):
101+
test_func(self, dtype, tester_factory)
102+
103+
dtype_name = str(dtype)[6:] # strip "torch."
104+
test_name = f"{test_func.__name__}_{dtype_name}_{flow_name}"
105+
setattr(cls, test_name, wrapped_test)
106+
else:
107+
raise NotImplementedError(f"Unknown test type {test_type}.")
108+
109+
110+
class OperatorTest(unittest.TestCase):
111+
def _test_op(self, model, inputs, tester_factory):
112+
tester = (
113+
tester_factory(
114+
model,
115+
inputs,
116+
)
117+
.export()
118+
.to_edge_transform_and_lower()
119+
)
120+
121+
is_delegated = any(
122+
n.target == torch._higher_order_ops.executorch_call_delegate
123+
for n in tester.stages[tester.cur].graph_module.graph.nodes
124+
if n.op == "call_function"
125+
)
126+
127+
# Only run the runtime test if the op was delegated.
128+
if is_delegated:
129+
(
130+
tester
131+
.to_executorch()
132+
.serialize()
133+
.run_method_and_compare_outputs()
134+
)
135+

backends/test/compliance_suite/operators/__init__.py

Whitespace-only changes.
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
2+
3+
# pyre-strict
4+
5+
from typing import Callable
6+
7+
import torch
8+
9+
from executorch.backends.test.compliance_suite import (
10+
dtype_test,
11+
operator_test,
12+
OperatorTest,
13+
)
14+
15+
class Model(torch.nn.Module):
16+
def forward(self, x, y):
17+
return x + y
18+
19+
class ModelAlpha(torch.nn.Module):
20+
def __init__(self, alpha):
21+
super().__init__()
22+
self.alpha = alpha
23+
24+
def forward(self, x, y):
25+
return torch.add(x, y, alpha=self.alpha)
26+
27+
@operator_test
28+
class Add(OperatorTest):
29+
@dtype_test
30+
def test_add_dtype(self, dtype, tester_factory: Callable) -> None:
31+
self._test_op(
32+
Model(),
33+
(
34+
(torch.rand(2, 10) * 100).to(dtype),
35+
(torch.rand(2, 10) * 100).to(dtype),
36+
),
37+
tester_factory)
38+
39+
def test_add_f32_bcast_first(self, tester_factory: Callable) -> None:
40+
self._test_op(
41+
Model(),
42+
(
43+
torch.randn(5),
44+
torch.randn(1, 5, 1, 5),
45+
),
46+
tester_factory)
47+
48+
def test_add_f32_bcast_second(self, tester_factory: Callable) -> None:
49+
self._test_op(
50+
Model(),
51+
(
52+
torch.randn(4, 4, 2, 7),
53+
torch.randn(2, 7),
54+
),
55+
tester_factory)
56+
57+
def test_add_f32_bcast_unary(self, tester_factory: Callable) -> None:
58+
self._test_op(
59+
Model(),
60+
(
61+
torch.randn(5),
62+
torch.randn(1, 1, 5),
63+
),
64+
tester_factory)
65+
66+
def test_add_f32_alpha(self, tester_factory: Callable) -> None:
67+
self._test_op(
68+
ModelAlpha(alpha=2),
69+
(
70+
torch.randn(1, 25),
71+
torch.randn(1, 25),
72+
),
73+
tester_factory)
74+
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
2+
3+
# pyre-strict
4+
5+
from typing import Callable, Optional
6+
7+
import torch
8+
9+
from executorch.backends.test.compliance_suite import (
10+
dtype_test,
11+
operator_test,
12+
OperatorTest,
13+
)
14+
15+
class Model(torch.nn.Module):
16+
def forward(self, x, y):
17+
return x / y
18+
19+
class ModelWithRounding(torch.nn.Module):
20+
def __init__(self, rounding_mode: Optional[str]):
21+
super().__init__()
22+
self.rounding_mode = rounding_mode
23+
24+
def forward(self, x, y):
25+
return torch.div(x, y, rounding_mode=self.rounding_mode)
26+
27+
@operator_test
28+
class Divide(OperatorTest):
29+
@dtype_test
30+
def test_divide_dtype(self, dtype, tester_factory: Callable) -> None:
31+
self._test_op(
32+
Model(),
33+
(
34+
(torch.rand(2, 10) * 100).to(dtype),
35+
(torch.rand(2, 10) * 100 + 0.1).to(dtype), # Adding 0.1 to avoid division by zero
36+
),
37+
tester_factory)
38+
39+
def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None:
40+
self._test_op(
41+
Model(),
42+
(
43+
torch.randn(5),
44+
torch.randn(1, 5, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero
45+
),
46+
tester_factory)
47+
48+
def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None:
49+
self._test_op(
50+
Model(),
51+
(
52+
torch.randn(4, 4, 2, 7),
53+
torch.randn(2, 7).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero
54+
),
55+
tester_factory)
56+
57+
def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None:
58+
self._test_op(
59+
Model(),
60+
(
61+
torch.randn(5),
62+
torch.randn(1, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero
63+
),
64+
tester_factory)
65+
66+
def test_divide_f32_trunc(self, tester_factory: Callable) -> None:
67+
self._test_op(
68+
ModelWithRounding(rounding_mode="trunc"),
69+
(
70+
torch.randn(3, 4) * 10,
71+
torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero
72+
),
73+
tester_factory)
74+
75+
def test_divide_f32_floor(self, tester_factory: Callable) -> None:
76+
self._test_op(
77+
ModelWithRounding(rounding_mode="floor"),
78+
(
79+
torch.randn(3, 4) * 10,
80+
torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero
81+
),
82+
tester_factory)
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
2+
3+
# pyre-strict
4+
5+
from typing import Callable
6+
7+
import torch
8+
9+
from executorch.backends.test.compliance_suite import (
10+
dtype_test,
11+
operator_test,
12+
OperatorTest,
13+
)
14+
15+
class Model(torch.nn.Module):
16+
def forward(self, x, y):
17+
return x * y
18+
19+
@operator_test
20+
class Multiply(OperatorTest):
21+
@dtype_test
22+
def test_multiply_dtype(self, dtype, tester_factory: Callable) -> None:
23+
self._test_op(
24+
Model(),
25+
(
26+
(torch.rand(2, 10) * 100).to(dtype),
27+
(torch.rand(2, 10) * 100).to(dtype),
28+
),
29+
tester_factory)
30+
31+
def test_multiply_f32_bcast_first(self, tester_factory: Callable) -> None:
32+
self._test_op(
33+
Model(),
34+
(
35+
torch.randn(5),
36+
torch.randn(1, 5, 1, 5),
37+
),
38+
tester_factory)
39+
40+
def test_multiply_f32_bcast_second(self, tester_factory: Callable) -> None:
41+
self._test_op(
42+
Model(),
43+
(
44+
torch.randn(4, 4, 2, 7),
45+
torch.randn(2, 7),
46+
),
47+
tester_factory)
48+
49+
def test_multiply_f32_bcast_unary(self, tester_factory: Callable) -> None:
50+
self._test_op(
51+
Model(),
52+
(
53+
torch.randn(5),
54+
torch.randn(1, 1, 5),
55+
),
56+
tester_factory)

0 commit comments

Comments
 (0)