2828
2929class OperatorsSupportedForCoreMLBackend (OperatorSupportBase ):
3030 def __init__ (
31- self , skip_ops_for_coreml_delegation : Optional [List [str ]] = None
31+ self ,
32+ skip_ops_for_coreml_delegation : Optional [List [str ]] = None ,
33+ lower_full_graph : bool = False ,
3234 ) -> None :
3335 if skip_ops_for_coreml_delegation is None :
3436 skip_ops_for_coreml_delegation = []
3537 super ().__init__ ()
3638 self .skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
39+ self .lower_full_graph = lower_full_graph
40+ self ._logged_msgs = set ()
41+
42+ def log_once (self , msg : str ) -> None :
43+ if msg not in self ._logged_msgs :
44+ logging .info (msg )
45+ self ._logged_msgs .add (msg )
3746
3847 def is_node_supported (self , submodules , node : torch .fx .Node ) -> bool :
3948 # get_attr node can always be supported on any backend
@@ -44,14 +53,63 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
4453 # skip ops if specified by user
4554 node_target_name = getattr (node .target , "__name__" , "" ).lower ()
4655 if node_target_name in (self .skip_ops_for_coreml_delegation or []):
56+ self .log_once (
57+ "Skipping op for CoreML delegation because it is in skip_ops_for_coreml_delegation: "
58+ + node_target_name
59+ )
60+ assert (
61+ not self .lower_full_graph
62+ ), "Cannot have skip_ops_for_coreml_delegation when lower_full_graph is True"
4763 return False
64+
65+ # TODO: enable this after bugs in ExecuTorch's partitioner are fixed
66+ # # If lower_full_graph=False, do not partition nodes with symbolic args because it can result in symbolic args
67+ # # in the placeholders due to partitioning, which CoreML does not support
68+ # if not self.lower_full_graph and any(
69+ # isinstance(arg, torch.fx.Node)
70+ # and isinstance(
71+ # arg.meta.get("val", None),
72+ # (torch.SymInt, torch.SymBool, torch.SymFloat),
73+ # )
74+ # for arg in node.args
75+ # ):
76+ # self.log_once(
77+ # "Skipping op for CoreML delegation because it contains symbolic args: "
78+ # + node_target_name
79+ # )
80+ # assert not self.lower_full_graph
81+ # return False
82+
4883 # query coremltools to see if node is supported
49- return ct .converters .mil .frontend .torch .is_torch_fx_node_supported (node )
84+ is_supported = ct .converters .mil .frontend .torch .is_torch_fx_node_supported (
85+ node
86+ )
87+ if not is_supported :
88+ if self .lower_full_graph :
89+ raise NotImplementedError (
90+ f"""CoreML does not support the op { node_target_name } , but you have set lower_full_graph=True in the CoreMLPartitioner.
91+
92+ Please set lower_full_graph=False in the CoreMLPartitioner to allow running unsupported ops outside of CoreML. Note that setting lower_full_graph=False may affect performance of CoreML and the available features.
93+ As an alternative to setting lower_full_graph=False, you can try rewriting your model to avoid using this op.
94+
95+ Also consider filing an issue with Apple's coremltools repo to request support for the op: https://github.com/apple/coremltools/issues
96+ Do not file an issue with ExecuTorch for op support.
97+ """
98+ )
99+ self .log_once (
100+ "Skipping op for CoreML delegation because it is not supported by CoreML: "
101+ + node_target_name
102+ )
103+ return is_supported
50104 # cowardly refuse to support all other types of node:
51105 # 1. placeholder / output nodes should not be tagged
52106 # reference: https://github.com/pytorch/executorch/pull/1398
53107 # 2. call_module / call_method should have been replaced with call_function?
54108 else :
109+ self .log_once (
110+ "Skipping op for CoreML delegation because it is not get_attr or call_function: "
111+ + node .op
112+ )
55113 return False
56114
57115
@@ -62,6 +120,8 @@ def __init__(
62120 skip_ops_for_coreml_delegation : Optional [List [str ]] = None ,
63121 compile_specs : Optional [List [CompileSpec ]] = None ,
64122 take_over_mutable_buffer : Optional [bool ] = True ,
123+ lower_full_graph : bool = False ,
124+ take_over_constant_data : bool = True ,
65125 ) -> None :
66126 if skip_ops_for_coreml_delegation is None :
67127 skip_ops_for_coreml_delegation = []
@@ -71,6 +131,20 @@ def __init__(
71131 compile_specs = compile_specs if compile_specs is not None else [],
72132 )
73133 self .take_over_mutable_buffer = take_over_mutable_buffer
134+ self .lower_full_graph = lower_full_graph
135+ self .take_over_constant_data = take_over_constant_data
136+ self ._logged_msgs = set ()
137+
138+ if self .lower_full_graph :
139+ assert (
140+ len (self .skip_ops_for_coreml_delegation ) == 0
141+ ), "When lower_full_graph=True, you cannot set skip_ops_for_coreml_delegation"
142+ assert (
143+ self .take_over_constant_data
144+ ), "When lower_full_graph=True, you must set take_over_constant_data=True"
145+ assert (
146+ self .take_over_mutable_buffer
147+ ), "When lower_full_graph=True, you must set take_over_mutable_buffer=True"
74148
75149 def partition (self , exported_program : ExportedProgram ) -> PartitionResult :
76150 # Run the CapabilityBasedPartitioner to return the largest possible
@@ -80,7 +154,9 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
80154
81155 capability_partitioner = CapabilityBasedPartitioner (
82156 exported_program .graph_module ,
83- OperatorsSupportedForCoreMLBackend (self .skip_ops_for_coreml_delegation ),
157+ OperatorsSupportedForCoreMLBackend (
158+ self .skip_ops_for_coreml_delegation , self .lower_full_graph
159+ ),
84160 allows_single_node_partition = True ,
85161 )
86162 partition_list = capability_partitioner .propose_partitions ()
@@ -90,7 +166,8 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
90166 node .meta ["delegation_tag" ] = tag
91167 partition_tags [tag ] = self .delegation_spec
92168
93- tag_constant_data (exported_program )
169+ if self .take_over_constant_data :
170+ tag_constant_data (exported_program )
94171 if self .take_over_mutable_buffer :
95172 logger .info (
96173 "Core ML partitioner will take over torch mutable buffer as Core ML state, "
@@ -105,12 +182,18 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
105182 tagged_exported_program = exported_program , partition_tags = partition_tags
106183 )
107184
185+ def log_once (self , msg : str ) -> None :
186+ if msg not in self ._logged_msgs :
187+ logging .info (msg )
188+ self ._logged_msgs .add (msg )
189+
108190 def ops_to_not_decompose (
109191 self , ep : ExportedProgram
110192 ) -> Tuple [List [torch ._ops .OpOverload ], Optional [Callable [[torch .fx .Node ], bool ]]]:
111193 do_not_decompose = []
112- op_support = OperatorsSupportedForCoreMLBackend ()
113- _logged_warnings = set ()
194+ op_support = OperatorsSupportedForCoreMLBackend (
195+ self .skip_ops_for_coreml_delegation , self .lower_full_graph
196+ )
114197
115198 # CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace
116199 # TODO: upstream fixes, but pending ET consuming a new published version of coremltools with the
@@ -134,9 +217,7 @@ def ops_to_not_decompose(
134217 except Exception as e :
135218 # CoreML's op_support.is_node_supported will sometimes throw
136219 # for unsupported ops, rather than returning False
137- warn_str = f"Encountered exception when checking node support: { e } "
138- if warn_str not in _logged_warnings :
139- logger .warning (warn_str )
140- _logged_warnings .add (warn_str )
141-
220+ self .log_once (
221+ f"Encountered exception when checking node support, treating node as unsupported: { e } "
222+ )
142223 return do_not_decompose , None
0 commit comments