Skip to content

Commit 6d86fa9

Browse files
authored
Improve CoreML partitioner logging (#12726)
Improve logging for CoreML partitioner. We now log when nodes are skipped by default.
1 parent 5867ed1 commit 6d86fa9

File tree

1 file changed

+14
-8
lines changed

1 file changed

+14
-8
lines changed

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,25 +23,27 @@
2323
from torch.fx.passes.operator_support import OperatorSupportBase
2424

2525
logger = logging.getLogger(__name__)
26-
logger.setLevel(logging.WARNING)
26+
logger.setLevel(logging.INFO)
2727

2828

29-
class OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
29+
class _OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
3030
def __init__(
3131
self,
3232
skip_ops_for_coreml_delegation: Optional[List[str]] = None,
3333
lower_full_graph: bool = False,
34+
log: bool = False,
3435
) -> None:
3536
if skip_ops_for_coreml_delegation is None:
3637
skip_ops_for_coreml_delegation = []
3738
super().__init__()
3839
self.skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
3940
self.lower_full_graph = lower_full_graph
4041
self._logged_msgs = set()
42+
self._log = log
4143

4244
def log_once(self, msg: str) -> None:
43-
if msg not in self._logged_msgs:
44-
logging.info(msg)
45+
if self._log and msg not in self._logged_msgs:
46+
logger.info(msg)
4547
self._logged_msgs.add(msg)
4648

4749
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
@@ -154,8 +156,10 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
154156

155157
capability_partitioner = CapabilityBasedPartitioner(
156158
exported_program.graph_module,
157-
OperatorsSupportedForCoreMLBackend(
158-
self.skip_ops_for_coreml_delegation, self.lower_full_graph
159+
_OperatorsSupportedForCoreMLBackend(
160+
self.skip_ops_for_coreml_delegation,
161+
self.lower_full_graph,
162+
log=True,
159163
),
160164
allows_single_node_partition=True,
161165
)
@@ -191,8 +195,10 @@ def ops_to_not_decompose(
191195
self, ep: ExportedProgram
192196
) -> Tuple[List[torch._ops.OpOverload], Optional[Callable[[torch.fx.Node], bool]]]:
193197
do_not_decompose = []
194-
op_support = OperatorsSupportedForCoreMLBackend(
195-
self.skip_ops_for_coreml_delegation, self.lower_full_graph
198+
op_support = _OperatorsSupportedForCoreMLBackend(
199+
self.skip_ops_for_coreml_delegation,
200+
self.lower_full_graph,
201+
log=False,
196202
)
197203

198204
# CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace

0 commit comments

Comments
 (0)