@@ -66,7 +66,7 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
6666 ), "Cannot have skip_ops_for_coreml_delegation when lower_full_graph is True"
6767 return False
6868
69- # TODO: enable this after bugs in to_edge_transform_and_lower are fixed
69+ # TODO: enable this after bugs in ExecuTorch's partitioner are fixed
7070 # # If lower_full_graph=False, do not partition nodes with symbolic args because it can result in symbolic args
7171 # # in the placeholders due to partitioning, which CoreML does not support
7272 # if not self.lower_full_graph and any(
@@ -137,6 +137,7 @@ def __init__(
137137 self .take_over_mutable_buffer = take_over_mutable_buffer
138138 self .lower_full_graph = lower_full_graph
139139 self .take_over_constant_data = take_over_constant_data
140+ self ._logged_msgs = set ()
140141
141142 def partition (self , exported_program : ExportedProgram ) -> PartitionResult :
142143 # Run the CapabilityBasedPartitioner to return the largest possible
@@ -174,14 +175,18 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
174175 tagged_exported_program = exported_program , partition_tags = partition_tags
175176 )
176177
178+ def log_once (self , msg : str ) -> None :
179+ if msg not in self ._logged_msgs :
180+ logging .info (msg )
181+ self ._logged_msgs .add (msg )
182+
177183 def ops_to_not_decompose (
178184 self , ep : ExportedProgram
179185 ) -> Tuple [List [torch ._ops .OpOverload ], Optional [Callable [[torch .fx .Node ], bool ]]]:
180186 do_not_decompose = []
181187 op_support = OperatorsSupportedForCoreMLBackend (
182188 self .skip_ops_for_coreml_delegation , self .lower_full_graph
183189 )
184- _logged_warnings = set ()
185190
186191 # CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace
187192 # TODO: upstream fixes, but pending ET consuming a new published version of coremltools with the
@@ -205,8 +210,7 @@ def ops_to_not_decompose(
205210 except Exception as e :
206211 # CoreML's op_support.is_node_supported will sometimes throw
207212 # for unsupported ops, rather than returning False
208- warn_str = f"Encountered exception when checking node support: { e } "
209- if warn_str not in _logged_warnings :
210- logger .warning (warn_str )
211- _logged_warnings .add (warn_str )
213+ self .log_once (
214+ f"Encountered exception when checking node support, treating node as unsupported: { e } "
215+ )
212216 return do_not_decompose , None
0 commit comments