Skip to content

Commit ee307af

Browse files
committed
Update
[ghstack-poisoned]
1 parent a6c9a30 commit ee307af

File tree

11 files changed

+296
-415
lines changed

11 files changed

+296
-415
lines changed

.ci/scripts/test_backend_linux.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ fi
5454
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
5555

5656
EXIT_CODE=0
57-
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
57+
pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$?
5858

5959
# Generate markdown summary.
60-
python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE
60+
python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

.ci/scripts/test_backend_macos.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ PYTHON_EXECUTABLE=python
2424
${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release
2525

2626
EXIT_CODE=0
27-
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
27+
pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$?
2828

2929
# Generate markdown summary.
30-
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE
30+
python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

backends/test/suite/conftest.py

Lines changed: 59 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,30 @@
1+
from typing import Any
2+
13
import pytest
24
import torch
35

4-
from executorch.backends.test.suite.flow import TestFlow, all_flows
6+
from executorch.backends.test.suite.flow import all_flows
57
from executorch.backends.test.suite.reporting import _sum_op_counts
68
from executorch.backends.test.suite.runner import run_test
79

8-
from typing import Any
9-
10-
BACKENDS = ["xnnpack", "coreml", "vulkan", "qnn", "arm"]
1110

1211
def pytest_configure(config):
13-
for backend in BACKENDS:
14-
config.addinivalue_line("markers", f"backend_{backend}: mark a test as testing the {backend} backend")
15-
12+
backends = set()
13+
14+
for flow in all_flows().values():
15+
config.addinivalue_line(
16+
"markers",
17+
f"flow_{flow.name}: mark a test as testing the {flow.name} flow",
18+
)
19+
20+
if flow.backend not in backends:
21+
config.addinivalue_line(
22+
"markers",
23+
f"backend_{flow.backend}: mark a test as testing the {flow.backend} backend",
24+
)
25+
backends.add(flow.backend)
26+
27+
1628
class TestRunner:
1729
def __init__(self, flow, test_name, test_base_name):
1830
self._flow = flow
@@ -21,7 +33,13 @@ def __init__(self, flow, test_name, test_base_name):
2133
self._subtest = 0
2234
self._results = []
2335

24-
def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_random_test_inputs=True):
36+
def lower_and_run_model(
37+
self,
38+
model: torch.nn.Module,
39+
inputs: Any,
40+
generate_random_test_inputs=True,
41+
dynamic_shapes=None,
42+
):
2543
run_summary = run_test(
2644
model,
2745
inputs,
@@ -31,13 +49,13 @@ def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_rand
3149
self._subtest,
3250
None,
3351
generate_random_test_inputs=generate_random_test_inputs,
52+
dynamic_shapes=dynamic_shapes,
3453
)
3554

3655
self._subtest += 1
3756
self._results.append(run_summary)
3857

3958
if not run_summary.result.is_success():
40-
raise RuntimeError("Test failure.") from run_summary.error
4159
if run_summary.result.is_backend_failure():
4260
raise RuntimeError("Test failure.") from run_summary.error
4361
else:
@@ -46,15 +64,27 @@ def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_rand
4664
f"Test failed for reasons other than backend failure. Error: {run_summary.error}"
4765
)
4866

49-
@pytest.fixture(params=all_flows().values(), ids=str)
67+
68+
@pytest.fixture(
69+
params=[
70+
pytest.param(
71+
f,
72+
marks=[
73+
getattr(pytest.mark, f"flow_{f.name}"),
74+
getattr(pytest.mark, f"backend_{f.backend}"),
75+
],
76+
)
77+
for f in all_flows().values()
78+
],
79+
ids=str,
80+
)
5081
def test_runner(request):
5182
return TestRunner(request.param, request.node.name, request.node.originalname)
5283

84+
5385
@pytest.hookimpl(optionalhook=True)
5486
def pytest_json_runtest_metadata(item, call):
55-
metadata = {
56-
"subtests": []
57-
}
87+
metadata = {"subtests": []}
5888

5989
if hasattr(item, "funcargs") and "test_runner" in item.funcargs:
6090
runner_instance = item.funcargs["test_runner"]
@@ -85,16 +115,26 @@ def pytest_json_runtest_metadata(item, call):
85115
else None
86116
)
87117
subtest_metadata["Lower Time (s)"] = (
88-
f"{record.lower_time.total_seconds():.3f}" if record.lower_time else None
118+
f"{record.lower_time.total_seconds():.3f}"
119+
if record.lower_time
120+
else None
89121
)
90122

91123
for output_idx, error_stats in enumerate(record.tensor_error_statistics):
92-
subtest_metadata[f"Output {output_idx} Error Max"] = f"{error_stats.error_max:.3f}"
93-
subtest_metadata[f"Output {output_idx} Error MAE"] = f"{error_stats.error_mae:.3f}"
124+
subtest_metadata[f"Output {output_idx} Error Max"] = (
125+
f"{error_stats.error_max:.3f}"
126+
)
127+
subtest_metadata[f"Output {output_idx} Error MAE"] = (
128+
f"{error_stats.error_mae:.3f}"
129+
)
94130
subtest_metadata[f"Output {output_idx} SNR"] = f"{error_stats.sqnr:.3f}"
95131

96-
subtest_metadata["Delegated Nodes"] = _sum_op_counts(record.delegated_op_counts)
97-
subtest_metadata["Undelegated Nodes"] = _sum_op_counts(record.undelegated_op_counts)
132+
subtest_metadata["Delegated Nodes"] = _sum_op_counts(
133+
record.delegated_op_counts
134+
)
135+
subtest_metadata["Undelegated Nodes"] = _sum_op_counts(
136+
record.undelegated_op_counts
137+
)
98138
if record.delegated_op_counts:
99139
subtest_metadata["Delegated Ops"] = dict(record.delegated_op_counts)
100140
if record.undelegated_op_counts:
@@ -104,6 +144,5 @@ def pytest_json_runtest_metadata(item, call):
104144
)
105145

106146
metadata["subtests"].append(subtest_metadata)
107-
108-
147+
109148
return metadata

backends/test/suite/generate_markdown_summary_json.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
import argparse
2-
import csv
3-
import functools
42
import json
5-
import sys
63

74
from dataclasses import dataclass, field
85

@@ -216,18 +213,17 @@ def build_header(data) -> dict[str, int]:
216213

217214
keys = max(data, key=len)
218215

219-
header = {
220-
k:i for (i,k) in enumerate(keys)
221-
}
216+
header = {k: i for (i, k) in enumerate(keys)}
222217

223218
for rec in data:
224219
keys = set(rec.keys())
225220
for k in keys:
226221
if k not in header:
227222
header[k] = len(header)
228-
223+
229224
return header
230225

226+
231227
def build_row(rec, header: dict[str, int]) -> list[str]:
232228
row = [""] * len(header)
233229
for k, v in rec.items():

backends/test/suite/models/__init__.py

Lines changed: 0 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -5,71 +5,3 @@
55
# LICENSE file in the root directory of this source tree.
66

77
# pyre-unsafe
8-
9-
import itertools
10-
import os
11-
import unittest
12-
from typing import Any, Callable
13-
14-
import torch
15-
from executorch.backends.test.suite import get_test_flows
16-
from executorch.backends.test.suite.context import get_active_test_context, TestContext
17-
from executorch.backends.test.suite.flow import TestFlow
18-
from executorch.backends.test.suite.reporting import log_test_summary
19-
from executorch.backends.test.suite.runner import run_test
20-
21-
22-
DTYPES: list[torch.dtype] = [
23-
torch.float16,
24-
torch.float32,
25-
]
26-
27-
28-
class ModelTest(unittest.TestCase):
29-
pass
30-
31-
32-
class TestCaseShim:
33-
def __init__(self, test_runner):
34-
self._test_runner = test_runner
35-
36-
def _test_op(self, model, args, flow, generate_random_test_inputs=True):
37-
self._test_runner.lower_and_run_model(model, args)
38-
39-
40-
def wrap_test(original_func, test_type):
41-
def wrapped_func(test_runner):
42-
shim = TestCaseShim(test_runner)
43-
original_func(shim, test_runner._flow)
44-
45-
return wrapped_func
46-
47-
48-
def model_test_cls(cls):
49-
parent_module = sys.modules[cls.__module__]
50-
51-
for func_name in dir(cls):
52-
if func_name.startswith("test"):
53-
original_func = getattr(cls, func_name)
54-
test_type = getattr(original_func, "test_type", TestType.STANDARD)
55-
wrapped_func = wrap_test(original_func, test_type)
56-
setattr(parent_module, func_name, wrapped_func)
57-
58-
return None
59-
60-
61-
def model_test_params(
62-
supports_dynamic_shapes: bool = True,
63-
dtypes: list[torch.dtype] | None = None,
64-
) -> Callable:
65-
"""Optional parameter decorator for model tests. Specifies test pararameters. Only valid with a class decorated by model_test_cls."""
66-
67-
def inner_decorator(func: Callable) -> Callable:
68-
func.supports_dynamic_shapes = supports_dynamic_shapes # type: ignore
69-
70-
if dtypes is not None:
71-
func.dtypes = dtypes # type: ignore
72-
73-
return func
74-
75-
return inner_decorator

backends/test/suite/models/test_torchaudio.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
import torchaudio
1515

1616
from executorch.backends.test.suite import dtype_to_str
17-
from executorch.backends.test.suite.flow import TestFlow
1817
from torch.export import Dim
1918

2019
#
@@ -45,9 +44,7 @@ def forward(
4544

4645

4746
@pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str)
48-
@pytest.mark.parametrize(
49-
"use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"]
50-
)
47+
@pytest.mark.parametrize("use_dynamic_shapes", [False], ids=["static_shapes"])
5148
def test_conformer(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool):
5249
inner_model = torchaudio.models.Conformer(
5350
input_dim=80,
@@ -72,7 +69,7 @@ def test_conformer(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool):
7269
@pytest.mark.parametrize(
7370
"use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"]
7471
)
75-
def test_wav2letter(flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool):
72+
def test_wav2letter(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool):
7673
model = torchaudio.models.Wav2Letter().to(dtype)
7774
inputs = (torch.randn(1, 1, 1024, dtype=dtype),)
7875
dynamic_shapes = (
@@ -85,13 +82,11 @@ def test_wav2letter(flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool
8582
else None
8683
)
8784

88-
test_runner.lower_and_run_model(model, inputs)
85+
test_runner.lower_and_run_model(model, inputs, dynamic_shapes=dynamic_shapes)
8986

9087

9188
@pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str)
92-
@pytest.mark.parametrize(
93-
"use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"]
94-
)
89+
@pytest.mark.parametrize("use_dynamic_shapes", [False], ids=["static_shapes"])
9590
@unittest.skip("This model times out on all backends.")
9691
def test_wavernn(
9792
test_runner,
@@ -108,8 +103,8 @@ def test_wavernn(
108103

109104
# See https://docs.pytorch.org/audio/stable/generated/torchaudio.models.WaveRNN.html#forward
110105
inputs = (
111-
torch.randn(1, 1, (64 - 5 + 1) * 200), # waveform
112-
torch.randn(1, 1, 128, 64), # specgram
113-
).to(dtype)
106+
torch.randn(1, 1, (64 - 5 + 1) * 200).to(dtype), # waveform
107+
torch.randn(1, 1, 128, 64).to(dtype), # specgram
108+
)
114109

115110
test_runner.lower_and_run_model(model, inputs)

0 commit comments

Comments
 (0)