Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -395,11 +395,6 @@ def call(self, graph_module: torch.fx.GraphModule): # noqa: C901
# The node requires nchw inputs
for input_node in node.all_input_nodes:
self.input_to_nchw(graph_module, input_node, node)
elif node.target == exir_ops.edge.aten._to_copy.default:
if node.kwargs["memory_format"] == torch.channels_last:
self.mark_as_nhwc_node(node)
else:
self.mark_as_nchw_node(node)
else:
# The node can have inputs in any format (but all must be the
# same format)
Expand Down
3 changes: 0 additions & 3 deletions backends/xnnpack/partition/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
SquareRootConfig,
SubConfig,
TanhConfig,
ToDimOrderCopyConfig,
UpsampleBilinear2dConfig,
)
from executorch.backends.xnnpack.partition.config.node_configs import (
Expand Down Expand Up @@ -103,8 +102,6 @@
ReciprocalSquareRootConfig,
ReLUConfig,
TanhConfig,
ToDimOrderCopyConfig,
# SDPAConfig, TODO: D60553559: preserving SDPA for fairseq fails
SigmoidConfig,
SliceCopyConfig,
SoftmaxConfig,
Expand Down
29 changes: 0 additions & 29 deletions backends/xnnpack/partition/config/generic_node_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,35 +397,6 @@ def supported_precision_types(self) -> List[ConfigPrecisionType]:
return [ConfigPrecisionType.FP32]


class ToDimOrderCopyConfig(GenericNodePartitionerConfig):
target_name = "_to_dim_order_copy.default"

def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool:
"""
Only support dim order conversion partitioning, not DType conversions
"""
if not self.check_common_constraints(node, ep):
return False

# Get input node and compare dtypes
input_node = get_input_node(node, 0)
input_dtype = input_node.meta["val"].dtype
output_dtype = node.meta["val"].dtype

# Return False if doing dtype conversion
if input_dtype != output_dtype:
why(
node,
reason=f"dtype conversion from {input_dtype} to {output_dtype} is not supported",
)
return False

return True

def supported_precision_types(self) -> List[ConfigPrecisionType]:
return [ConfigPrecisionType.FP32]


class MeanDimConfig(GenericNodePartitionerConfig):
target_name = "mean.dim"

Expand Down
85 changes: 0 additions & 85 deletions backends/xnnpack/test/ops/test_to_copy.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -173,23 +173,6 @@ def test_fp32_channels_last_tagged_reshape_pass(self):
.run_method_and_compare_outputs()
)

class LinearConvDimSwap(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3)
self.linear1 = torch.nn.Linear(4, 3)

def forward(self, x):
y = self.linear1(x)
y = y.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.contiguous_format)
return self.conv1(y)

LinearConvDimSwapModule = LinearConvDimSwap()

def test_conv_linear_dim_order_swap_partitioner(self):
self.run_tester(self.LinearConvDimSwapModule, (torch.randn(1, 3, 6, 4),))

def test_qs8_channels_last_tagged_reshape_pass(self):
for module, num_reshape in self.modules.items():
(
Expand Down
Loading