Skip to content

Commit 1ac0a8e

Browse files
committed
Improve CoreML partitioner logging
1 parent a624083 commit 1ac0a8e

File tree

1 file changed

+10
-8
lines changed

1 file changed

+10
-8
lines changed

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,25 +23,27 @@
2323
from torch.fx.passes.operator_support import OperatorSupportBase
2424

2525
logger = logging.getLogger(__name__)
26-
logger.setLevel(logging.WARNING)
26+
logger.setLevel(logging.INFO)
2727

2828

29-
class OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
29+
class _OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
3030
def __init__(
3131
self,
3232
skip_ops_for_coreml_delegation: Optional[List[str]] = None,
3333
lower_full_graph: bool = False,
34+
log: bool = False,
3435
) -> None:
3536
if skip_ops_for_coreml_delegation is None:
3637
skip_ops_for_coreml_delegation = []
3738
super().__init__()
3839
self.skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
3940
self.lower_full_graph = lower_full_graph
4041
self._logged_msgs = set()
42+
self._log = log
4143

4244
def log_once(self, msg: str) -> None:
43-
if msg not in self._logged_msgs:
44-
logging.info(msg)
45+
if self._log and msg not in self._logged_msgs:
46+
logger.info(msg)
4547
self._logged_msgs.add(msg)
4648

4749
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
@@ -154,8 +156,8 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
154156

155157
capability_partitioner = CapabilityBasedPartitioner(
156158
exported_program.graph_module,
157-
OperatorsSupportedForCoreMLBackend(
158-
self.skip_ops_for_coreml_delegation, self.lower_full_graph
159+
_OperatorsSupportedForCoreMLBackend(
160+
self.skip_ops_for_coreml_delegation, self.lower_full_graph, log=True,
159161
),
160162
allows_single_node_partition=True,
161163
)
@@ -191,8 +193,8 @@ def ops_to_not_decompose(
191193
self, ep: ExportedProgram
192194
) -> Tuple[List[torch._ops.OpOverload], Optional[Callable[[torch.fx.Node], bool]]]:
193195
do_not_decompose = []
194-
op_support = OperatorsSupportedForCoreMLBackend(
195-
self.skip_ops_for_coreml_delegation, self.lower_full_graph
196+
op_support = _OperatorsSupportedForCoreMLBackend(
197+
self.skip_ops_for_coreml_delegation, self.lower_full_graph, log=False,
196198
)
197199

198200
# CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace

0 commit comments

Comments
 (0)