Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions backends/apple/coreml/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ runtime.python_library(
name = "backend",
srcs = glob([
"compiler/*.py",
"logging.py",
]),
visibility = [
"@EXECUTORCH_CLIENTS",
Expand All @@ -33,6 +34,7 @@ runtime.python_library(
name = "partitioner",
srcs = glob([
"partition/*.py",
"logging.py",
]),
visibility = [
"@EXECUTORCH_CLIENTS",
Expand Down
9 changes: 5 additions & 4 deletions backends/apple/coreml/compiler/coreml_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,20 @@

import coremltools as ct
import coremltools.optimize as cto

from executorch.backends.apple.coreml import executorchcoreml
from executorch.backends.apple.coreml.logging import get_coreml_log_level
from executorch.exir.backend.backend_details import (
BackendDetails,
ExportedProgram,
PreprocessResult,
)
from executorch.exir.backend.compile_spec_schema import CompileSpec

logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)

from executorch.backends.apple.coreml.compiler.torch_ops import * # noqa: F401, F403

logger = logging.getLogger(__name__)
logger.setLevel(get_coreml_log_level(default_level=logging.WARNING))


class COMPILE_SPEC_KEYS(Enum):
COMPUTE_UNITS = "compute_units"
Expand Down Expand Up @@ -409,6 +409,7 @@ def preprocess(
edge_program: ExportedProgram,
compile_specs: List[CompileSpec],
) -> PreprocessResult:
logger.info(f"Edge program: {edge_program}")
model_type: CoreMLBackend.MODEL_TYPE = (
CoreMLBackend.model_type_from_compile_specs(
compile_specs,
Expand Down
4 changes: 2 additions & 2 deletions backends/apple/coreml/compiler/torch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# the op to the coremltools library.

import torch as _torch
from coremltools import _logger as logger
from coremltools import _logger
from coremltools.converters.mil.frontend import _utils
from coremltools.converters.mil.frontend.torch.ops import (
_get_inputs,
Expand Down Expand Up @@ -88,7 +88,7 @@ def dequantize_affine(context, node):
out_np_dtype = None
if len(inputs) > 7:
out_np_dtype = NUM_TO_NUMPY_DTYPE[inputs[7].val]
logger.warning(
_logger.warning(
f"Core ML ignores output_dtype {out_np_dtype} on torchao.dequantize_affine and instead uses the native precision."
)

Expand Down
24 changes: 24 additions & 0 deletions backends/apple/coreml/logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Copyright © 2023 Apple Inc. All rights reserved.
#
# Please refer to the license found in the LICENSE file in the root directory of the source tree.

import logging
import os
from typing import Optional


def get_coreml_log_level(default_level: int) -> Optional[str]:
level_str = os.environ.get("ET_COREML_LOG_LEVEL", "").upper()
if level_str == "":
return default_level

level_map = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
if level_str not in level_map:
raise ValueError(f"Invalid ET_COREML_LOG_LEVEL: {level_str}")
return level_map[level_str]
4 changes: 3 additions & 1 deletion backends/apple/coreml/partition/coreml_partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import torch

from executorch.backends.apple.coreml.compiler import CoreMLBackend

from executorch.backends.apple.coreml.logging import get_coreml_log_level
from executorch.exir.backend.compile_spec_schema import CompileSpec

from executorch.exir.backend.partitioner import (
Expand All @@ -23,7 +25,7 @@
from torch.fx.passes.operator_support import OperatorSupportBase

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.setLevel(get_coreml_log_level(default_level=logging.INFO))


class _OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
Expand Down
Loading