Skip to content

[Backend Tester] Add portable test flow #13250

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 105 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 95 commits
Commits
Show all changes
105 commits
Select commit Hold shift + click to select a range
f120e70
Update
GregoryComer Jul 18, 2025
0fb85e6
Update
GregoryComer Jul 18, 2025
4d8d844
Update
GregoryComer Jul 19, 2025
dc12b40
Update
GregoryComer Jul 21, 2025
ead0616
Update
GregoryComer Jul 22, 2025
0f13676
Update
GregoryComer Jul 22, 2025
b0b01f2
Update
GregoryComer Jul 22, 2025
8b9c9ef
Update
GregoryComer Jul 22, 2025
06bf03a
Update
GregoryComer Jul 22, 2025
2f8f49b
Update
GregoryComer Jul 22, 2025
8ca7766
Update
GregoryComer Jul 22, 2025
bffb95f
Update
GregoryComer Jul 22, 2025
d21492b
Update
GregoryComer Jul 22, 2025
e2c4ea5
Update
GregoryComer Jul 22, 2025
8230848
Update
GregoryComer Jul 22, 2025
2a1f564
Update
GregoryComer Jul 22, 2025
b35e7b1
Update
GregoryComer Jul 22, 2025
5c4c6ce
Update
GregoryComer Jul 22, 2025
9397803
Update
GregoryComer Jul 22, 2025
9dfeb5a
Update
GregoryComer Jul 22, 2025
ff5c4a5
Update
GregoryComer Jul 22, 2025
42a5de5
Update
GregoryComer Jul 22, 2025
402d8f5
Update
GregoryComer Jul 22, 2025
34d3ab3
Update
GregoryComer Jul 22, 2025
1105e04
Update
GregoryComer Jul 22, 2025
482bd21
Update
GregoryComer Jul 22, 2025
ea548b7
Update
GregoryComer Jul 23, 2025
4108f54
Update
GregoryComer Jul 23, 2025
7ef236b
Update
GregoryComer Jul 23, 2025
4a58c9d
Update
GregoryComer Jul 23, 2025
3b866b4
Update
GregoryComer Jul 23, 2025
5ba25cb
Update
GregoryComer Jul 23, 2025
19760fc
Update
GregoryComer Jul 23, 2025
81dfb07
Update
GregoryComer Jul 23, 2025
4d50265
Update
GregoryComer Jul 23, 2025
5f66043
Update
GregoryComer Jul 23, 2025
24e919d
Update
GregoryComer Jul 23, 2025
523cc20
Update
GregoryComer Jul 23, 2025
74c95fe
Update
GregoryComer Jul 23, 2025
5d437b1
Update
GregoryComer Jul 23, 2025
89757ce
Update
GregoryComer Jul 23, 2025
423f79a
Update
GregoryComer Jul 23, 2025
69f7f9c
Update
GregoryComer Jul 23, 2025
c0f6224
Update
GregoryComer Jul 23, 2025
e2ea2a3
Update
GregoryComer Jul 23, 2025
7a2fab5
Update
GregoryComer Jul 23, 2025
033c231
Update
GregoryComer Jul 23, 2025
a9ed762
Update
GregoryComer Jul 23, 2025
64b174a
Update
GregoryComer Jul 23, 2025
3976629
Update
GregoryComer Jul 23, 2025
27cd171
Update
GregoryComer Jul 23, 2025
7bdd3e5
Update
GregoryComer Jul 23, 2025
b1254cd
Update
GregoryComer Jul 23, 2025
f2e2289
Update
GregoryComer Jul 23, 2025
cdd15c1
Update
GregoryComer Jul 23, 2025
e2df06e
Update
GregoryComer Jul 23, 2025
4461bd8
Update
GregoryComer Jul 23, 2025
7e97fd0
Update
GregoryComer Jul 23, 2025
bcb697c
Update
GregoryComer Jul 23, 2025
11a5a02
Update
GregoryComer Jul 24, 2025
244b146
Update
GregoryComer Jul 24, 2025
de21ac2
Update
GregoryComer Jul 24, 2025
fd26fc7
Update
GregoryComer Jul 24, 2025
4ae840d
Update
GregoryComer Jul 24, 2025
710ea49
Update
GregoryComer Jul 24, 2025
32f54b0
Update
GregoryComer Jul 24, 2025
a27d18c
Update
GregoryComer Jul 24, 2025
2eb59fc
Update
GregoryComer Jul 24, 2025
5cc4941
Update
GregoryComer Jul 24, 2025
ef7af5c
Update
GregoryComer Jul 24, 2025
18e89c1
Update
GregoryComer Jul 24, 2025
4719c90
Update
GregoryComer Jul 25, 2025
dd09555
Update
GregoryComer Aug 8, 2025
f1db3a0
Update
GregoryComer Aug 8, 2025
e0700b2
Update
GregoryComer Aug 8, 2025
f260b50
Update
GregoryComer Aug 8, 2025
d62ee60
Update
GregoryComer Aug 8, 2025
b2ab3a5
Update
GregoryComer Aug 8, 2025
c23c3e9
Update
GregoryComer Aug 8, 2025
f261355
Update
GregoryComer Aug 11, 2025
c3a24f9
Update
GregoryComer Aug 11, 2025
1697cbc
Update
GregoryComer Aug 11, 2025
b94b45e
Update
GregoryComer Aug 11, 2025
5740f0a
Update
GregoryComer Aug 11, 2025
ed6840d
Update
GregoryComer Aug 11, 2025
f2a7e1f
Update
GregoryComer Aug 11, 2025
bd79ef2
Update
GregoryComer Aug 12, 2025
8932c29
Update
GregoryComer Aug 12, 2025
ea2549c
Update
GregoryComer Aug 12, 2025
f4b0dc2
Update
GregoryComer Aug 12, 2025
7e1a002
Update
GregoryComer Aug 12, 2025
a628d29
Update
GregoryComer Aug 12, 2025
3615d89
Update
GregoryComer Aug 12, 2025
e994bc1
Update
GregoryComer Aug 12, 2025
0aba8e1
Update
GregoryComer Aug 12, 2025
1d34f49
Update
GregoryComer Aug 12, 2025
933fba2
Update
GregoryComer Aug 12, 2025
d468ae4
Update
GregoryComer Aug 12, 2025
acbd480
Update
GregoryComer Aug 12, 2025
1897d4e
Update
GregoryComer Aug 12, 2025
f65d80f
Update
GregoryComer Aug 12, 2025
0d1f097
Update
GregoryComer Aug 12, 2025
871312a
Update
GregoryComer Aug 12, 2025
53990fe
Update
GregoryComer Aug 12, 2025
31bc137
Update
GregoryComer Aug 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion backends/qualcomm/tests/tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,9 @@ def __init__(
default_partitioner_cls=QnnPartitioner,
)

def run(self, artifact: ExportedProgram, inputs=None) -> None:
def run(
self, artifact: ExportedProgram, inputs=None, generate_etrecord: bool = False
) -> None:
ep = QnnPassManager().transform_for_export_pipeline(artifact)
transform_passes = QnnPassManager().get_to_edge_transform_passes(ep)

Expand All @@ -61,6 +63,7 @@ def run(self, artifact: ExportedProgram, inputs=None) -> None:
transform_passes=transform_passes,
partitioner=self.partitioners,
compile_config=self.edge_compile_conf,
generate_etrecord=generate_etrecord,
)


Expand Down
99 changes: 99 additions & 0 deletions backends/test/harness/error_statistics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
from dataclasses import dataclass

import torch
from torch.ao.ns.fx.utils import compute_sqnr


@dataclass
class TensorStatistics:
"""Contains summary statistics for a tensor."""

shape: torch.Size
""" The shape of the tensor. """

numel: int
""" The number of elements in the tensor. """

median: float
""" The median of the tensor. """

mean: float
""" The mean of the tensor. """

max: torch.types.Number
""" The maximum element of the tensor. """

min: torch.types.Number
""" The minimum element of the tensor. """

@classmethod
def from_tensor(cls, tensor: torch.Tensor) -> "TensorStatistics":
"""Creates a TensorStatistics object from a tensor."""
flattened = torch.flatten(tensor)
return cls(
shape=tensor.shape,
numel=tensor.numel(),
median=torch.quantile(flattened, q=0.5).item(),
mean=flattened.mean().item(),
max=flattened.max().item(),
min=flattened.min().item(),
)


@dataclass
class ErrorStatistics:
"""Contains statistics derived from the difference of two tensors."""

reference_stats: TensorStatistics
""" Statistics for the reference tensor. """

actual_stats: TensorStatistics
""" Statistics for the actual tensor. """

error_l2_norm: float | None
""" The L2 norm of the error between the actual and reference tensor. """

error_mae: float | None
""" The mean absolute error between the actual and reference tensor. """

error_max: float | None
""" The maximum absolute elementwise error between the actual and reference tensor. """

error_msd: float | None
""" The mean signed deviation between the actual and reference tensor. """

sqnr: float | None
""" The signal-to-quantization-noise ratio between the actual and reference tensor. """

@classmethod
def from_tensors(
cls, actual: torch.Tensor, reference: torch.Tensor
) -> "ErrorStatistics":
"""Creates an ErrorStatistics object from two tensors."""
actual = actual.to(torch.float64)
reference = reference.to(torch.float64)

if actual.shape != reference.shape:
return cls(
reference_stats=TensorStatistics.from_tensor(reference),
actual_stats=TensorStatistics.from_tensor(actual),
error_l2_norm=None,
error_mae=None,
error_max=None,
error_msd=None,
sqnr=None,
)

error = actual - reference
flat_error = torch.flatten(error)

return cls(
reference_stats=TensorStatistics.from_tensor(reference),
actual_stats=TensorStatistics.from_tensor(actual),
error_l2_norm=torch.linalg.norm(flat_error).item(),
error_mae=torch.mean(torch.abs(flat_error)).item(),
error_max=torch.max(torch.abs(flat_error)).item(),
error_msd=torch.mean(flat_error).item(),
# Torch sqnr implementation requires float32 due to decorator logic
sqnr=compute_sqnr(actual.to(torch.float), reference.to(torch.float)).item(),
)
14 changes: 11 additions & 3 deletions backends/test/harness/stages/to_edge_transform_and_lower.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,28 +7,36 @@
to_edge_transform_and_lower,
)
from executorch.exir.backend.partitioner import Partitioner

from torch.export import ExportedProgram


class ToEdgeTransformAndLower(Stage):
def __init__(
self,
default_partitioner_cls: Type,
default_partitioner_cls: Type | None = None,
partitioners: Optional[List[Partitioner]] = None,
edge_compile_config: Optional[EdgeCompileConfig] = None,
):
self.partitioners = partitioners or [default_partitioner_cls()]
self.partitioners = (
partitioners or [default_partitioner_cls()]
if default_partitioner_cls is not None
else []
)
self.edge_compile_conf = edge_compile_config or EdgeCompileConfig()
self.edge_dialect_program = None

def stage_type(self) -> StageType:
return StageType.TO_EDGE_TRANSFORM_AND_LOWER

def run(self, artifact: ExportedProgram, inputs=None) -> None:
def run(
self, artifact: ExportedProgram, inputs=None, generate_etrecord: bool = False
) -> None:
self.edge_dialect_program = to_edge_transform_and_lower(
artifact,
compile_config=self.edge_compile_conf,
partitioner=self.partitioners,
generate_etrecord=generate_etrecord,
)

@property
Expand Down
48 changes: 33 additions & 15 deletions backends/test/harness/tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import torch

from executorch.backends.test.harness.error_statistics import ErrorStatistics
from executorch.backends.test.harness.stages import (
Export,
Partition,
Expand Down Expand Up @@ -33,12 +34,12 @@ def __init__(
self,
module: torch.nn.Module,
example_inputs: Tuple[torch.Tensor],
stage_classes: Dict[StageType, Callable],
stage_classes: Dict[StageType, Callable] | None = None,
dynamic_shapes: Optional[Tuple[Any]] = None,
):
module.eval()

self.stage_classes = stage_classes
self.stage_classes = stage_classes or Tester.default_stage_classes()
self.original_module = module
self.example_inputs = example_inputs
self.dynamic_shapes = dynamic_shapes
Expand Down Expand Up @@ -182,10 +183,10 @@ def _post(self, stage):
assert stage_type in self.stages
self.stages[stage_type] = stage

def _run_stage(self, stage_instance, inputs=None):
def _run_stage(self, stage_instance, inputs=None, *args, **kwargs):
assert isinstance(stage_instance, Stage)
prev_stage_artifact = self._pre(stage_instance)
stage_instance.run(prev_stage_artifact, inputs=inputs)
stage_instance.run(prev_stage_artifact, inputs=inputs, *args, **kwargs) # noqa
self._post(stage_instance)
return self

Expand All @@ -212,11 +213,14 @@ def to_edge(self, to_edge_stage: Optional[ToEdge] = None):
return res

def to_edge_transform_and_lower(
self, to_edge_and_transform_stage: Optional[ToEdgeTransformAndLower] = None
self,
to_edge_and_transform_stage: Optional[ToEdgeTransformAndLower] = None,
generate_etrecord: bool = False,
):
return self._run_stage(
to_edge_and_transform_stage
or self._get_default_stage(StageType.TO_EDGE_TRANSFORM_AND_LOWER)
or self._get_default_stage(StageType.TO_EDGE_TRANSFORM_AND_LOWER),
generate_etrecord=generate_etrecord,
)

def run_passes(self, run_passes_stage: Optional[RunPasses] = None):
Expand Down Expand Up @@ -302,20 +306,15 @@ def run_method_and_compare_outputs(
atol=1e-03,
rtol=1e-03,
qtol=0,
statistics_callback: Callable[[ErrorStatistics], None] | None = None,
):
number_of_runs = 1 if inputs is not None else num_runs
reference_stage = self.stages[StageType.EXPORT]

stage = stage or self.cur

print(f"Comparing Stage {stage} with Stage {reference_stage}")
for run_iteration in range(number_of_runs):
for _ in range(number_of_runs):
inputs_to_run = inputs if inputs else next(self.generate_random_inputs())
input_shapes = [
generated_input.shape if hasattr(generated_input, "shape") else None
for generated_input in inputs_to_run
]
print(f"Run {run_iteration} with input shapes: {input_shapes}")

# Reference output (and quantization scale)
(
Expand All @@ -328,13 +327,25 @@ def run_method_and_compare_outputs(
# Output from running artifact at stage
stage_output = self.stages[stage].run_artifact(inputs_to_run)
self._compare_outputs(
reference_output, stage_output, quantization_scale, atol, rtol, qtol
reference_output,
stage_output,
quantization_scale,
atol,
rtol,
qtol,
statistics_callback,
)

return self

@staticmethod
def _assert_outputs_equal(model_output, ref_output, atol=1e-03, rtol=1e-03):
def _assert_outputs_equal(
model_output,
ref_output,
atol=1e-03,
rtol=1e-03,
statistics_callback: Callable[[ErrorStatistics], None] | None = None,
):
"""
Helper testing function that asserts that the model output and the reference output
are equal with some tolerance. Due to numerical differences between eager mode and
Expand All @@ -349,6 +360,11 @@ def _assert_outputs_equal(model_output, ref_output, atol=1e-03, rtol=1e-03):
for i in range(len(model_output)):
model = model_output[i]
ref = ref_output[i]

error_stats = ErrorStatistics.from_tensors(model, ref)
if statistics_callback is not None:
statistics_callback(error_stats)

assert (
ref.shape == model.shape
), f"Output {i} shape {model.shape} does not match reference output shape {ref.shape}"
Expand Down Expand Up @@ -386,6 +402,7 @@ def _compare_outputs(
atol=1e-03,
rtol=1e-03,
qtol=0,
statistics_callback: Callable[[ErrorStatistics], None] | None = None,
):
"""
Compares the original of the original nn module with the output of the generated artifact.
Expand All @@ -408,6 +425,7 @@ def _compare_outputs(
reference_output,
atol=atol,
rtol=rtol,
statistics_callback=statistics_callback,
)

@staticmethod
Expand Down
65 changes: 65 additions & 0 deletions backends/test/harness/tests/test_error_statistics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import unittest

import torch
from executorch.backends.test.harness.error_statistics import ErrorStatistics


class ErrorStatisticsTests(unittest.TestCase):
def test_error_stats_simple(self):
tensor1 = torch.tensor([1, 2, 3, 4])
tensor2 = torch.tensor([2, 2, 2, 5])

error_stats = ErrorStatistics.from_tensors(tensor1, tensor2)

# Check actual tensor statistics
self.assertEqual(error_stats.actual_stats.shape, torch.Size([4]))
self.assertEqual(error_stats.actual_stats.numel, 4)
self.assertEqual(error_stats.actual_stats.median, 2.5)
self.assertEqual(error_stats.actual_stats.mean, 2.5)
self.assertEqual(error_stats.actual_stats.max, 4)
self.assertEqual(error_stats.actual_stats.min, 1)

# Check reference tensor statistics
self.assertEqual(error_stats.reference_stats.shape, torch.Size([4]))
self.assertEqual(error_stats.reference_stats.numel, 4)
self.assertEqual(error_stats.reference_stats.median, 2.0)
self.assertEqual(error_stats.reference_stats.mean, 2.75)
self.assertEqual(error_stats.reference_stats.max, 5)
self.assertEqual(error_stats.reference_stats.min, 2)

# Check error statistics
self.assertAlmostEqual(error_stats.error_l2_norm, 1.732, places=3)
self.assertEqual(error_stats.error_mae, 0.75)
self.assertEqual(error_stats.error_max, 1.0)
self.assertEqual(error_stats.error_msd, -0.25)
self.assertAlmostEqual(error_stats.sqnr, 10.0, places=3)

def test_error_stats_different_shapes(self):
# Create tensors with different shapes
tensor1 = torch.tensor([1, 2, 3, 4])
tensor2 = torch.tensor([[2, 3], [4, 5]])

error_stats = ErrorStatistics.from_tensors(tensor1, tensor2)

# Check actual tensor statistics
self.assertEqual(error_stats.actual_stats.shape, torch.Size([4]))
self.assertEqual(error_stats.actual_stats.numel, 4)
self.assertEqual(error_stats.actual_stats.median, 2.5)
self.assertEqual(error_stats.actual_stats.mean, 2.5)
self.assertEqual(error_stats.actual_stats.max, 4)
self.assertEqual(error_stats.actual_stats.min, 1)

# Check reference tensor statistics
self.assertEqual(error_stats.reference_stats.shape, torch.Size([2, 2]))
self.assertEqual(error_stats.reference_stats.numel, 4)
self.assertEqual(error_stats.reference_stats.median, 3.5)
self.assertEqual(error_stats.reference_stats.mean, 3.5)
self.assertEqual(error_stats.reference_stats.max, 5)
self.assertEqual(error_stats.reference_stats.min, 2)

# Check that all error values are None when shapes differ
self.assertIsNone(error_stats.error_l2_norm)
self.assertIsNone(error_stats.error_mae)
self.assertIsNone(error_stats.error_max)
self.assertIsNone(error_stats.error_msd)
self.assertIsNone(error_stats.sqnr)
13 changes: 11 additions & 2 deletions backends/test/suite/flow.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging

from dataclasses import dataclass, field
from dataclasses import dataclass
from typing import Callable

from executorch.backends.test.harness import Tester
Expand All @@ -26,16 +26,25 @@ class TestFlow:
tester_factory: Callable[..., Tester]
""" A factory function that returns a Tester instance for this lowering flow. """

quantize: bool = field(default=False)
quantize: bool = False
""" Whether to tester should run the quantize stage on the model. """

quantize_stage_factory: Callable[..., Quantize] | None = None
""" A factory function which instantiates a Quantize stage. Can be None to use the tester's default. """

is_delegated: bool = True
""" Indicates whether the flow is expected to generate CALL_DELEGATE nodes. """


def all_flows() -> dict[str, TestFlow]:
flows = []

from executorch.backends.test.suite.flows.portable import PORTABLE_TEST_FLOW

flows += [
PORTABLE_TEST_FLOW,
]

try:
from executorch.backends.test.suite.flows.xnnpack import (
XNNPACK_STATIC_INT8_PER_CHANNEL_TEST_FLOW,
Expand Down
Loading
Loading