diff --git a/backends/xnnpack/partition/config/__init__.py b/backends/xnnpack/partition/config/__init__.py index e393f1c9ac8..6dadd4975ce 100644 --- a/backends/xnnpack/partition/config/__init__.py +++ b/backends/xnnpack/partition/config/__init__.py @@ -50,7 +50,6 @@ SquareRootConfig, SubConfig, TanhConfig, - ToDimOrderCopyConfig, UpsampleBilinear2dConfig, ) from executorch.backends.xnnpack.partition.config.node_configs import ( @@ -103,7 +102,6 @@ ReciprocalSquareRootConfig, ReLUConfig, TanhConfig, - ToDimOrderCopyConfig, SigmoidConfig, SliceCopyConfig, SoftmaxConfig, diff --git a/backends/xnnpack/partition/config/generic_node_configs.py b/backends/xnnpack/partition/config/generic_node_configs.py index 3faf1b12066..985fcbdd879 100644 --- a/backends/xnnpack/partition/config/generic_node_configs.py +++ b/backends/xnnpack/partition/config/generic_node_configs.py @@ -425,35 +425,6 @@ def supported_precision_types(self) -> List[ConfigPrecisionType]: return [ConfigPrecisionType.FP32] -class ToDimOrderCopyConfig(GenericNodePartitionerConfig): - target_name = "_to_dim_order_copy.default" - - def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool: - """ - Only support dim order conversion partitioning, not DType conversions - """ - if not self.check_common_constraints(node, ep): - return False - - # Get input node and compare dtypes - input_node = get_input_node(node, 0) - input_dtype = input_node.meta["val"].dtype - output_dtype = node.meta["val"].dtype - - # Return False if doing dtype conversion - if input_dtype != output_dtype: - why( - node, - reason=f"dtype conversion from {input_dtype} to {output_dtype} is not supported", - ) - return False - - return True - - def supported_precision_types(self) -> List[ConfigPrecisionType]: - return [ConfigPrecisionType.FP32, ConfigPrecisionType.STATIC_QUANT] - - class MeanDimConfig(GenericNodePartitionerConfig): target_name = "mean.dim" diff --git a/backends/xnnpack/test/ops/test_to_copy.py b/backends/xnnpack/test/ops/test_to_copy.py deleted file mode 100644 index 1c2980fb717..00000000000 --- a/backends/xnnpack/test/ops/test_to_copy.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch - -from executorch.backends.xnnpack.test.tester import Tester - - -class TestChannelsLastTaggedReshapePass(unittest.TestCase): - def setUp(self): - torch._dynamo.reset() - - def run_tester(self, module, inputs): - tester = Tester( - module.eval(), - inputs, - ) - tester.export().to_edge_transform_and_lower().check_not( - ["executorch_exir_dialects_edge__ops_aten__to_copy_default"] - ).to_executorch().serialize().run_method_and_compare_outputs() - - class ChannelLastBeforeLinear(torch.nn.Module): - def __init__(self): - super().__init__() - self.linear = torch.nn.Linear(3, 3) - - def forward(self, x): - y = x.to(memory_format=torch.channels_last) - return self.linear(y) - - ChannelLastBeforeLinearModule = ChannelLastBeforeLinear() - - def test_channel_last_before_linear(self): - self.run_tester(self.ChannelLastBeforeLinearModule, (torch.randn(1, 3, 3, 3),)) - - class ContiguousBeforeConv(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = torch.nn.Conv2d(3, 3, 3) - - def forward(self, x): - y = x.to(memory_format=torch.contiguous_format) - return self.conv(y) - - ContiguousBeforeConvModule = ContiguousBeforeConv() - - def test_contiguous_before_conv(self): - self.run_tester(self.ContiguousBeforeConvModule, (torch.randn(1, 3, 6, 6),)) - - class DtypeAndMemoryFormatConversion(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = torch.nn.Conv2d(3, 3, 3) - - def forward(self, x): - y = x.to(torch.float, memory_format=torch.channels_last) - return self.conv(y) - - DtypeAndMemoryFormatConversionModule = DtypeAndMemoryFormatConversion() - - def test_dtype_and_memory_format_conversion(self): - self.run_tester( - self.DtypeAndMemoryFormatConversionModule, - (torch.randint(0, 10, (1, 3, 6, 6), dtype=torch.int32),), - ) - - class DtypeAndMemoryFormatWithLinear(torch.nn.Module): - def __init__(self): - super().__init__() - self.linear = torch.nn.Linear(3, 3) - - def forward(self, x): - y = x.to(torch.float, memory_format=torch.channels_last) - return self.linear(y) - - DtypeAndMemoryFormatWithLinearModule = DtypeAndMemoryFormatWithLinear() - - def test_dtype_and_memory_format_with_linear(self): - self.run_tester( - self.DtypeAndMemoryFormatWithLinearModule, - (torch.randint(0, 10, (1, 3, 3, 3), dtype=torch.int16),), - ) - - class QuantizedToCopy(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = torch.nn.Conv2d(3, 3, 3) - self.conv2 = torch.nn.Conv2d(3, 3, 3) - - def forward(self, x): - y = self.conv(x) - y = y.to(memory_format=torch.contiguous_format) - return self.conv2(y) - - QuantizedToCopyModule = QuantizedToCopy() - - def test_quantized_to_copy(self): - tester = Tester( - self.QuantizedToCopyModule.eval(), - (torch.randn(1, 3, 9, 9),), - ) - - tester.quantize().export().to_edge_transform_and_lower().check_not( - [ - "executorch_exir_dialects_edge__ops_aten__to_copy_default", - "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default", - ] - ).to_executorch().serialize().run_method_and_compare_outputs(qtol=0.01) diff --git a/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py b/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py index a73a0eb0ad1..03515d8d420 100644 --- a/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py +++ b/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py @@ -54,9 +54,7 @@ def run_tester(self, module, inputs): module.eval(), inputs, ) - tester.export().to_edge_transform_and_lower().check_not( - ["executorch_exir_dialects_edge__ops_aten__to_copy_default"] - ).to_executorch().serialize().run_method_and_compare_outputs() + tester.export().to_edge_transform_and_lower().to_executorch().serialize().run_method_and_compare_outputs() class LinearConv(torch.nn.Module): def __init__(self): @@ -181,23 +179,6 @@ def test_fp32_channels_last_tagged_reshape_pass(self): .run_method_and_compare_outputs() ) - class LinearConvDimSwap(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = torch.nn.Conv2d(3, 3, 3) - self.linear1 = torch.nn.Linear(4, 3) - - def forward(self, x): - y = self.linear1(x) - y = y.to(memory_format=torch.channels_last) - y = y.to(memory_format=torch.contiguous_format) - return self.conv1(y) - - LinearConvDimSwapModule = LinearConvDimSwap() - - def test_conv_linear_dim_order_swap_partitioner(self): - self.run_tester(self.LinearConvDimSwapModule, (torch.randn(1, 3, 6, 4),)) - def test_qs8_channels_last_tagged_reshape_pass(self): for module, num_reshape in self.modules.items(): (