Skip to content

Commit 59f2a23

Browse files
authored
Merge branch 'gh/ahmtox/41/orig' into gh/ahmtox/45/orig
2 parents f45409e + b7b0604 commit 59f2a23

File tree

8 files changed

+62
-14
lines changed

8 files changed

+62
-14
lines changed

backends/apple/coreml/TARGETS

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ runtime.python_library(
1717
name = "backend",
1818
srcs = glob([
1919
"compiler/*.py",
20+
"logging.py",
2021
]),
2122
visibility = [
2223
"@EXECUTORCH_CLIENTS",
@@ -33,6 +34,7 @@ runtime.python_library(
3334
name = "partitioner",
3435
srcs = glob([
3536
"partition/*.py",
37+
"logging.py",
3638
]),
3739
visibility = [
3840
"@EXECUTORCH_CLIENTS",

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,20 +16,20 @@
1616

1717
import coremltools as ct
1818
import coremltools.optimize as cto
19-
2019
from executorch.backends.apple.coreml import executorchcoreml
20+
from executorch.backends.apple.coreml.logging import get_coreml_log_level
2121
from executorch.exir.backend.backend_details import (
2222
BackendDetails,
2323
ExportedProgram,
2424
PreprocessResult,
2525
)
2626
from executorch.exir.backend.compile_spec_schema import CompileSpec
2727

28-
logger = logging.getLogger(__name__)
29-
logger.setLevel(logging.WARNING)
30-
3128
from executorch.backends.apple.coreml.compiler.torch_ops import * # noqa: F401, F403
3229

30+
logger = logging.getLogger(__name__)
31+
logger.setLevel(get_coreml_log_level(default_level=logging.WARNING))
32+
3333

3434
class COMPILE_SPEC_KEYS(Enum):
3535
COMPUTE_UNITS = "compute_units"
@@ -409,6 +409,7 @@ def preprocess(
409409
edge_program: ExportedProgram,
410410
compile_specs: List[CompileSpec],
411411
) -> PreprocessResult:
412+
logger.info(f"Edge program: {edge_program}")
412413
model_type: CoreMLBackend.MODEL_TYPE = (
413414
CoreMLBackend.model_type_from_compile_specs(
414415
compile_specs,

backends/apple/coreml/compiler/torch_ops.py

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,15 @@
99
# the op to the coremltools library.
1010

1111
import torch as _torch
12-
from coremltools import _logger as logger
12+
from coremltools import _logger
1313
from coremltools.converters.mil.frontend import _utils
1414
from coremltools.converters.mil.frontend.torch.ops import (
1515
_get_inputs,
16+
_get_kwinputs,
1617
NUM_TO_NUMPY_DTYPE,
1718
NUM_TO_TORCH_DTYPE,
1819
split,
20+
to,
1921
transpose,
2022
unbind,
2123
)
@@ -24,6 +26,7 @@
2426
register_torch_op,
2527
)
2628
from coremltools.converters.mil.mil import types
29+
from executorch.exir.dim_order_utils import get_memory_format
2730

2831

2932
# https://github.com/apple/coremltools/pull/2556
@@ -44,6 +47,26 @@ def split_copy(context, node):
4447
split(context, node)
4548

4649

50+
@register_torch_op(
51+
torch_alias=[
52+
"dim_order_ops::_to_dim_order_copy",
53+
"dim_order_ops._to_dim_order_copy",
54+
],
55+
override=False,
56+
)
57+
def _to_dim_order_copy(context, node):
58+
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
59+
node.kwinputs.pop("dim_order")
60+
61+
# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
62+
dim_order = [int(d) for d in dim_order.val]
63+
memory_format = get_memory_format(dim_order)
64+
assert (
65+
memory_format == _torch.contiguous_format
66+
), "Only contiguous memory format is supported in CoreML"
67+
to(context, node)
68+
69+
4770
# https://github.com/apple/coremltools/pull/2558
4871
@register_torch_op(
4972
torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"],
@@ -88,7 +111,7 @@ def dequantize_affine(context, node):
88111
out_np_dtype = None
89112
if len(inputs) > 7:
90113
out_np_dtype = NUM_TO_NUMPY_DTYPE[inputs[7].val]
91-
logger.warning(
114+
_logger.warning(
92115
f"Core ML ignores output_dtype {out_np_dtype} on torchao.dequantize_affine and instead uses the native precision."
93116
)
94117

backends/apple/coreml/logging.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# Copyright © 2023 Apple Inc. All rights reserved.
2+
#
3+
# Please refer to the license found in the LICENSE file in the root directory of the source tree.
4+
5+
import logging
6+
import os
7+
from typing import Optional
8+
9+
10+
def get_coreml_log_level(default_level: int) -> Optional[str]:
11+
level_str = os.environ.get("ET_COREML_LOG_LEVEL", "").upper()
12+
if level_str == "":
13+
return default_level
14+
15+
level_map = {
16+
"DEBUG": logging.DEBUG,
17+
"INFO": logging.INFO,
18+
"WARNING": logging.WARNING,
19+
"ERROR": logging.ERROR,
20+
"CRITICAL": logging.CRITICAL,
21+
}
22+
if level_str not in level_map:
23+
raise ValueError(f"Invalid ET_COREML_LOG_LEVEL: {level_str}")
24+
return level_map[level_str]

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
import torch
1111

1212
from executorch.backends.apple.coreml.compiler import CoreMLBackend
13+
14+
from executorch.backends.apple.coreml.logging import get_coreml_log_level
1315
from executorch.exir.backend.compile_spec_schema import CompileSpec
1416

1517
from executorch.exir.backend.partitioner import (
@@ -23,7 +25,7 @@
2325
from torch.fx.passes.operator_support import OperatorSupportBase
2426

2527
logger = logging.getLogger(__name__)
26-
logger.setLevel(logging.INFO)
28+
logger.setLevel(get_coreml_log_level(default_level=logging.INFO))
2729

2830

2931
def _is_view_op(op: torch._ops.OpOverload) -> bool:

backends/vulkan/quantizer/vulkan_quantizer_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,4 +220,4 @@ def _convert_scalars_to_attrs(model: torch.fx.GraphModule) -> torch.fx.GraphModu
220220
new_args.append(get_attr_node)
221221
n.args = tuple(new_args)
222222
model.recompile()
223-
return model
223+
return model

backends/vulkan/test/op_tests/quantize_affine_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1376,4 +1376,4 @@ TEST(VulkanChooseQParamsAffineTest, test_symmetric_no_clipping_narrow_range) {
13761376
10, // quant_max (narrow range)
13771377
1e-5, // eps
13781378
at::kFloat); // input dtype
1379-
}
1379+
}

examples/apple/coreml/llama/export.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
from executorch.exir import to_edge_transform_and_lower
2323
from executorch.exir.backend.utils import format_delegated_graph
24-
from executorch.exir.capture._config import EdgeCompileConfig, ExecutorchBackendConfig
24+
from executorch.exir.capture._config import ExecutorchBackendConfig
2525
from executorch.exir.passes import MemoryPlanningPass
2626
from executorch.exir.passes.quant_fusion_pass import QuantFusionPass
2727
from executorch.exir.passes.sym_shape_eval_pass import ConstraintBasedSymShapeEvalPass
@@ -203,10 +203,6 @@ def main() -> None:
203203
edge_manager = to_edge_transform_and_lower(
204204
ep,
205205
partitioner=[partitioner],
206-
compile_config=EdgeCompileConfig(
207-
# TODO: fix lowering when dim_order is enabled
208-
_skip_dim_order=True,
209-
),
210206
)
211207

212208
print("Delegated program")

0 commit comments

Comments
 (0)