Skip to content

Commit 23e1692

Browse files
authored
Support dim_order in CoreML (#12985)
Add support for dim_order op in CoreML. Currently, the dim_order op is skipped. This occasionally leads to lowering / runtime errors, so often you have a better experience by setting _skip_dim_order=True. This will fix the CI failure in trunk / test-models-macos-coreml (emformer_transcribe) / macos-job
1 parent 275adee commit 23e1692

File tree

2 files changed

+24
-5
lines changed

2 files changed

+24
-5
lines changed

backends/apple/coreml/compiler/torch_ops.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,11 @@
1313
from coremltools.converters.mil.frontend import _utils
1414
from coremltools.converters.mil.frontend.torch.ops import (
1515
_get_inputs,
16+
_get_kwinputs,
1617
NUM_TO_NUMPY_DTYPE,
1718
NUM_TO_TORCH_DTYPE,
1819
split,
20+
to,
1921
transpose,
2022
unbind,
2123
)
@@ -24,6 +26,7 @@
2426
register_torch_op,
2527
)
2628
from coremltools.converters.mil.mil import types
29+
from executorch.exir.dim_order_utils import get_memory_format
2730

2831

2932
# https://github.com/apple/coremltools/pull/2556
@@ -44,6 +47,26 @@ def split_copy(context, node):
4447
split(context, node)
4548

4649

50+
@register_torch_op(
51+
torch_alias=[
52+
"dim_order_ops::_to_dim_order_copy",
53+
"dim_order_ops._to_dim_order_copy",
54+
],
55+
override=False,
56+
)
57+
def _to_dim_order_copy(context, node):
58+
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
59+
node.kwinputs.pop("dim_order")
60+
61+
# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
62+
dim_order = [int(d) for d in dim_order.val]
63+
memory_format = get_memory_format(dim_order)
64+
assert (
65+
memory_format == _torch.contiguous_format
66+
), "Only contiguous memory format is supported in CoreML"
67+
to(context, node)
68+
69+
4770
# https://github.com/apple/coremltools/pull/2558
4871
@register_torch_op(
4972
torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"],

examples/apple/coreml/llama/export.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
from executorch.exir import to_edge_transform_and_lower
2323
from executorch.exir.backend.utils import format_delegated_graph
24-
from executorch.exir.capture._config import EdgeCompileConfig, ExecutorchBackendConfig
24+
from executorch.exir.capture._config import ExecutorchBackendConfig
2525
from executorch.exir.passes import MemoryPlanningPass
2626
from executorch.exir.passes.quant_fusion_pass import QuantFusionPass
2727
from executorch.exir.passes.sym_shape_eval_pass import ConstraintBasedSymShapeEvalPass
@@ -203,10 +203,6 @@ def main() -> None:
203203
edge_manager = to_edge_transform_and_lower(
204204
ep,
205205
partitioner=[partitioner],
206-
compile_config=EdgeCompileConfig(
207-
# TODO: fix lowering when dim_order is enabled
208-
_skip_dim_order=True,
209-
),
210206
)
211207

212208
print("Delegated program")

0 commit comments

Comments
 (0)