diff --git a/backends/test/suite/operators/__init__.py b/backends/test/suite/operators/__init__.py index 79684fd43ec..ec335562b39 100644 --- a/backends/test/suite/operators/__init__.py +++ b/backends/test/suite/operators/__init__.py @@ -133,7 +133,9 @@ def _create_test_for_backend( class OperatorTest(unittest.TestCase): - def _test_op(self, model, inputs, flow: TestFlow): + def _test_op( + self, model, inputs, flow: TestFlow, generate_random_test_inputs: bool = True + ): context = get_active_test_context() # This should be set in the wrapped test. See _make_wrapped_test above. @@ -145,6 +147,7 @@ def _test_op(self, model, inputs, flow: TestFlow): flow, context.test_name, context.params, + generate_random_test_inputs=generate_random_test_inputs, ) log_test_summary(run_summary) diff --git a/backends/test/suite/operators/test_embedding.py b/backends/test/suite/operators/test_embedding.py new file mode 100644 index 00000000000..07e09952db8 --- /dev/null +++ b/backends/test/suite/operators/test_embedding.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=100, + embedding_dim=50, + ): + super().__init__() + self.embedding = torch.nn.Embedding( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + ) + + def forward(self, x): + return self.embedding(x) + + +@operator_test +class Embedding(OperatorTest): + # Note that generate_random_test_inputs is used to avoid the tester + # generating random inputs that are out of range of the embedding size. + # The tester's random input generation is not smart enough to know that + # the index inputs must be within a certain range. + + @dtype_test + def test_embedding_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model().to(dtype), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + + def test_embedding_sizes(self, flow: TestFlow) -> None: + self._test_op( + Model(num_embeddings=5, embedding_dim=3), + (torch.randint(0, 5, (2, 8), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(num_embeddings=100, embedding_dim=10), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(num_embeddings=1000, embedding_dim=50), + (torch.randint(0, 1000, (2, 4), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + + def test_embedding_batch_dim(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randint(0, 100, (5,), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(), + (torch.randint(0, 100, (2, 3, 4), dtype=torch.long),), + flow, + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_embedding_bag.py b/backends/test/suite/operators/test_embedding_bag.py new file mode 100644 index 00000000000..2659bdd9b0b --- /dev/null +++ b/backends/test/suite/operators/test_embedding_bag.py @@ -0,0 +1,118 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + num_embeddings=10, + embedding_dim=5, + mode="mean", + include_last_offset: bool = False, + ): + super().__init__() + self.embedding_bag = torch.nn.EmbeddingBag( + num_embeddings=num_embeddings, + embedding_dim=embedding_dim, + mode=mode, + include_last_offset=include_last_offset, + ) + + def forward(self, x, offsets=None): + return self.embedding_bag(x, offsets) + + +@operator_test +class EmbeddingBag(OperatorTest): + # Note that generate_random_test_inputs is used to avoid the tester + # generating random inputs that are out of range of the embedding size. + # The tester's random input generation is not smart enough to know that + # the index inputs must be within a certain range. + + @dtype_test + def test_embedding_bag_dtype(self, flow: TestFlow, dtype) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + self._test_op( + Model().to(dtype), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + + def test_embedding_bag_sizes(self, flow: TestFlow) -> None: + indices = torch.tensor([1, 2, 3, 1], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + + self._test_op( + Model(num_embeddings=5, embedding_dim=3), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + + indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long) + offsets = torch.tensor([0, 2, 4], dtype=torch.long) + self._test_op( + Model(num_embeddings=50, embedding_dim=10), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + + indices = torch.tensor([100, 200, 300, 400], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + self._test_op( + Model(num_embeddings=500, embedding_dim=20), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + + def test_embedding_bag_modes(self, flow: TestFlow) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op( + Model(mode="sum"), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(mode="mean"), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + self._test_op( + Model(mode="max"), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) + + def test_embedding_bag_include_last_offset(self, flow: TestFlow) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + self._test_op( + Model(include_last_offset=True), + (indices, offsets), + flow, + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/runner.py b/backends/test/suite/runner.py index 3fe9084548c..dd6e3586628 100644 --- a/backends/test/suite/runner.py +++ b/backends/test/suite/runner.py @@ -33,6 +33,7 @@ def run_test( # noqa: C901 test_name: str, params: dict | None, dynamic_shapes: Any | None = None, + generate_random_test_inputs: bool = True, ) -> TestCaseSummary: """ Top-level test run function for a model, input set, and tester. Handles test execution @@ -102,7 +103,9 @@ def build_result( # the cause of a failure in run_method_and_compare_outputs. We can look for # AssertionErrors to catch output mismatches, but this might catch more than that. try: - tester.run_method_and_compare_outputs() + tester.run_method_and_compare_outputs( + inputs=None if generate_random_test_inputs else inputs + ) except AssertionError as e: return build_result(TestResult.OUTPUT_MISMATCH_FAIL, e) except Exception as e: