2626logger .setLevel (logging .INFO )
2727
2828
29+ def _is_view_op (op : torch ._ops .OpOverload ) -> bool :
30+ schema = op ._schema
31+ if len (schema .arguments ) == 0 :
32+ return False
33+ alias_info = schema .arguments [0 ].alias_info
34+ return (alias_info is not None ) and (not alias_info .is_write )
35+
36+
2937class _OperatorsSupportedForCoreMLBackend (OperatorSupportBase ):
3038 def __init__ (
3139 self ,
@@ -119,6 +127,7 @@ class CoreMLPartitioner(Partitioner):
119127
120128 def __init__ (
121129 self ,
130+ * ,
122131 skip_ops_for_coreml_delegation : Optional [List [str ]] = None ,
123132 compile_specs : Optional [List [CompileSpec ]] = None ,
124133 take_over_mutable_buffer : Optional [bool ] = True ,
@@ -209,6 +218,9 @@ def ops_to_not_decompose(
209218 torch .ops .aten .triu .default ,
210219 # https://github.com/apple/coremltools/blob/release/8.3/coremltools/converters/mil/frontend/torch/ops.py#L6997-L6998
211220 torch .ops .aten .tril .default ,
221+ # CoreML's translation of repeat_interleave has poor perf
222+ torch .ops .aten .repeat_interleave .self_int ,
223+ torch .ops .aten .repeat_interleave .self_Tensor ,
212224 ]
213225 for node in ep .graph .nodes :
214226 if node .op == "call_function" and isinstance (
@@ -218,6 +230,7 @@ def ops_to_not_decompose(
218230 if (
219231 op_support .is_node_supported (None , node )
220232 and node .target not in do_not_decompose_blocklist
233+ and not _is_view_op (node .target )
221234 ):
222235 do_not_decompose .append (node .target )
223236 except Exception as e :
0 commit comments