99# the op to the coremltools library.
1010
1111import torch as _torch
12- from coremltools import _logger as logger
12+ from coremltools import _logger
1313from coremltools .converters .mil .frontend import _utils
1414from coremltools .converters .mil .frontend .torch .ops import (
1515 _get_inputs ,
16+ _get_kwinputs ,
1617 NUM_TO_NUMPY_DTYPE ,
1718 NUM_TO_TORCH_DTYPE ,
1819 split ,
20+ to ,
1921 transpose ,
2022 unbind ,
2123)
2426 register_torch_op ,
2527)
2628from coremltools .converters .mil .mil import types
29+ from executorch .exir .dim_order_utils import get_memory_format
2730
2831
2932# https://github.com/apple/coremltools/pull/2556
@@ -44,6 +47,26 @@ def split_copy(context, node):
4447 split (context , node )
4548
4649
50+ @register_torch_op (
51+ torch_alias = [
52+ "dim_order_ops::_to_dim_order_copy" ,
53+ "dim_order_ops._to_dim_order_copy" ,
54+ ],
55+ override = False ,
56+ )
57+ def _to_dim_order_copy (context , node ):
58+ dim_order = _get_kwinputs (context , node , "dim_order" , default = [None ])[0 ]
59+ node .kwinputs .pop ("dim_order" )
60+
61+ # In CoreML, dim_order.val will be an ndarray, so we convert it to a list
62+ dim_order = [int (d ) for d in dim_order .val ]
63+ memory_format = get_memory_format (dim_order )
64+ assert (
65+ memory_format == _torch .contiguous_format
66+ ), "Only contiguous memory format is supported in CoreML"
67+ to (context , node )
68+
69+
4770# https://github.com/apple/coremltools/pull/2558
4871@register_torch_op (
4972 torch_alias = ["torchao::dequantize_affine" , "torchao.dequantize_affine" ],
@@ -88,7 +111,7 @@ def dequantize_affine(context, node):
88111 out_np_dtype = None
89112 if len (inputs ) > 7 :
90113 out_np_dtype = NUM_TO_NUMPY_DTYPE [inputs [7 ].val ]
91- logger .warning (
114+ _logger .warning (
92115 f"Core ML ignores output_dtype { out_np_dtype } on torchao.dequantize_affine and instead uses the native precision."
93116 )
94117
0 commit comments