Skip to content

Commit 3f81e81

Browse files
authored
Revert "[EXIR] Register _clone_dim_order op and map aten.clone" (#13723)
Reverts #12971
1 parent 8210d02 commit 3f81e81

File tree

11 files changed

+5
-231
lines changed

11 files changed

+5
-231
lines changed

backends/apple/coreml/compiler/torch_ops.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from coremltools.converters.mil.frontend.torch.ops import (
1616
_get_inputs,
1717
_get_kwinputs,
18-
noop,
1918
NUM_TO_NUMPY_DTYPE,
2019
NUM_TO_TORCH_DTYPE,
2120
split,
@@ -92,28 +91,6 @@ def _to_dim_order_copy(context, node):
9291
to(context, node)
9392

9493

95-
@register_torch_op(
96-
torch_alias=[
97-
"dim_order_ops::_clone_dim_order",
98-
"dim_order_ops._clone_dim_order",
99-
],
100-
override=False,
101-
)
102-
def _clone_dim_order(context, node):
103-
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
104-
node.kwinputs.pop("dim_order")
105-
106-
# In CoreML, dim_order.val will be a ndarray, so we convert it to a list to check memory format.
107-
dim_order = [int(d) for d in dim_order.val]
108-
memory_format = get_memory_format(dim_order)
109-
assert (
110-
memory_format == _torch.contiguous_format
111-
), "Only contiguous memory format is supported in CoreML"
112-
113-
# Since CoreML only supports contiguous format, no dim_order preservation is needed. Treat this as a no-op clone.
114-
noop(context, node)
115-
116-
11794
# https://github.com/apple/coremltools/pull/2558
11895
@register_torch_op(
11996
torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"],

backends/apple/coreml/test/test_torch_ops.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -213,28 +213,6 @@ def test_dequantize_codebook_embedding(self):
213213
et_prog = delegated_program.to_executorch()
214214
self._compare_outputs(et_prog, model, example_inputs)
215215

216-
def test__clone_dim_order_contiguous(self):
217-
class Model(torch.nn.Module):
218-
def forward(self, x):
219-
return torch.ops.dim_order_ops._clone_dim_order(
220-
x, dim_order=[0, 1, 2, 3]
221-
)
222-
223-
model, example_inputs = Model(), (torch.randn(1, 3, 8, 8),)
224-
ep = torch.export.export(model, example_inputs)
225-
delegated_program = executorch.exir.to_edge_transform_and_lower(
226-
ep,
227-
partitioner=[self._coreml_partitioner()],
228-
)
229-
for node in delegated_program.exported_program().graph.nodes:
230-
if node.op == "call_function":
231-
assert node.target.__name__ in [
232-
"executorch_call_delegate",
233-
"getitem",
234-
], f"Got unexpected node target after delegation: {node.target.__name__}"
235-
et_prog = delegated_program.to_executorch()
236-
self._compare_outputs(et_prog, model, example_inputs)
237-
238216

239217
if __name__ == "__main__":
240218
test_runner = TestTorchOps()
@@ -245,4 +223,3 @@ def forward(self, x):
245223
test_runner.test_dequantize_affine_c8w_embedding_b4w_linear()
246224
test_runner.test_dequantize_codebook_linear()
247225
test_runner.test_dequantize_codebook_embedding()
248-
test_runner.test__clone_dim_order_contiguous()

backends/arm/_passes/remove_clone_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class RemoveClonePass(ExportPass):
1414
"""Remove all clones from graph_module"""
1515

1616
def call_operator(self, op, args, kwargs, meta):
17-
if op != exir_ops.edge.dim_order_ops._clone_dim_order.default:
17+
if op != exir_ops.edge.aten.clone.default:
1818
return super().call_operator(op, args, kwargs, meta)
1919

2020
if len(args) != 1:

backends/arm/operator_support/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
# pyre-unsafe
77

88
from . import ( # noqa
9-
clone_dim_order_support,
109
convolution_support,
1110
embedding_support,
1211
ethos_u55_support,

backends/arm/operator_support/clone_dim_order_support.py

Lines changed: 0 additions & 76 deletions
This file was deleted.

backends/arm/test/misc/test_partition_decomposed_quantized_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
]
3939
linear_residual_exir_op: list[str] = [
4040
"executorch_exir_dialects_edge__ops_aten_gelu_default",
41-
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default",
41+
"executorch_exir_dialects_edge__ops_aten_clone_default",
4242
"executorch_exir_dialects_edge__ops_aten_linear_default",
4343
"executorch_exir_dialects_edge__ops_aten_add_Tensor",
4444
]

backends/arm/test/ops/test_clone.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
)
2424

2525
aten_op = "torch.ops.aten.clone.default"
26-
exir_op = "executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default"
26+
exir_op = "executorch_exir_dialects_edge__ops_aten_clone_default"
2727

2828
input_t = Tuple[torch.Tensor]
2929

backends/arm/test/passes/test_remove_clone_pass.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,9 @@ def test_remove_clone_tosa_INT():
3535
module.get_inputs(),
3636
quantize=True,
3737
ops_before_pass={
38-
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default": 1,
38+
"executorch_exir_dialects_edge__ops_aten_clone_default": 1,
3939
},
40-
ops_not_after_pass=[
41-
"executorch_exir_dialects_edge__ops_dim_order_ops__clone_dim_order_default"
42-
],
40+
ops_not_after_pass=["executorch_exir_dialects_edge__ops_aten_clone_default"],
4341
pass_list=[RemoveClonePass],
4442
)
4543
pipeline.run()

exir/passes/dim_order_ops_registry.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,6 @@
2828
"_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
2929
)
3030

31-
lib.define(
32-
"_clone_dim_order(Tensor self, *, bool non_blocking=False, int[]? dim_order=None) -> Tensor"
33-
)
34-
35-
lib.define(
36-
"_clone_dim_order.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)"
37-
)
38-
3931

4032
def _op_impl(target, *args, **kwargs):
4133
kwargs["memory_format"] = get_memory_format(kwargs.get("dim_order", None))
@@ -65,23 +57,12 @@ def _empty_dim_order_out_impl(*args, **kwargs):
6557
return _op_impl(torch.ops.aten.empty.out, *args, **kwargs)
6658

6759

68-
@impl(lib, "_clone_dim_order", "CompositeImplicitAutograd")
69-
def _clone_dim_order_impl(*args, **kwargs):
70-
return _op_impl(torch.ops.aten.clone.default, *args, **kwargs)
71-
72-
73-
@impl(lib, "_clone_dim_order.out", "CompositeImplicitAutograd")
74-
def _clone_dim_order_out_impl(*args, **kwargs):
75-
return _op_impl(torch.ops.aten.clone.out, *args, **kwargs)
76-
77-
7860
"""
7961
Defines a map of edge ops to the corresponding dim_order ops for quick lookup
8062
"""
8163
DimOrderOpsMap = {
8264
exir_ops.edge.aten._to_copy.default: exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
8365
exir_ops.edge.aten.empty.memory_format: exir_ops.edge.dim_order_ops._empty_dim_order.default,
84-
exir_ops.edge.aten.clone.default: exir_ops.edge.dim_order_ops._clone_dim_order.default,
8566
}
8667

8768
"""

exir/tests/test_memory_format_ops_pass.py

Lines changed: 0 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,7 @@
2727
AmbiguousDimOrderError,
2828
MemoryFormatOpsPassTestUtils,
2929
MemoryFormatTestSet,
30-
PropagateToCloneChannelsLastModule,
3130
PropagateToCopyChannalsLastModule,
32-
SimpleCloneChannelsLastModule,
33-
SimpleCloneContiguousModule,
3431
SimpleEmptyChannelLastModule,
3532
SimpleEmptyContiguoustModule,
3633
SimpleToCopyChannelsLastModule,
@@ -94,36 +91,6 @@ def test_op_empty_replacement_contiguous(self) -> None:
9491
),
9592
)
9693

97-
def test_op_clone_replacement_contiguous(self) -> None:
98-
model = SimpleCloneContiguousModule()
99-
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
100-
self,
101-
MemoryFormatTestSet(
102-
module=model.eval(),
103-
op=torch.ops.aten.clone.default,
104-
sample_input=(
105-
torch.randn((3, 4, 5, 6)).to(memory_format=torch.channels_last),
106-
),
107-
target_memory_format=torch.contiguous_format,
108-
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
109-
),
110-
)
111-
112-
def test_op_clone_replacement_channels_last(self) -> None:
113-
model = SimpleCloneChannelsLastModule()
114-
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
115-
self,
116-
MemoryFormatTestSet(
117-
module=model.eval(),
118-
op=torch.ops.aten.clone.default,
119-
sample_input=(
120-
torch.randn((3, 4, 5, 6)).to(memory_format=torch.contiguous_format),
121-
),
122-
target_memory_format=torch.channels_last,
123-
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
124-
),
125-
)
126-
12794
def test_op_dim_order_update(self) -> None:
12895
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
12996
self,
@@ -161,25 +128,6 @@ def test_op_dim_order_propagation(self) -> None:
161128
check_unambiguous_dim_order=True,
162129
)
163130

164-
def test_op_clone_dim_order_propagation(self) -> None:
165-
MemoryFormatOpsPassTestUtils.memory_format_test_runner(
166-
self,
167-
MemoryFormatTestSet(
168-
module=PropagateToCloneChannelsLastModule().eval(),
169-
op=torch.ops.aten.clone.default,
170-
sample_input=(
171-
torch.rand_like(
172-
torch.zeros([2, 2, 2, 2]),
173-
dtype=torch.float32,
174-
memory_format=torch.contiguous_format,
175-
),
176-
),
177-
target_memory_format=torch.channels_last,
178-
_load_for_executorch_from_buffer=_load_for_executorch_from_buffer,
179-
),
180-
check_unambiguous_dim_order=True,
181-
)
182-
183131
def test_op_dim_order_propagation_ambiguous(self) -> None:
184132
try:
185133
MemoryFormatOpsPassTestUtils.memory_format_test_runner(

0 commit comments

Comments
 (0)