Skip to content

Commit 06c38ad

Browse files
authored
Merge branch 'main' into add-dim-order-clone-kernel
2 parents 0100fd4 + 09d5255 commit 06c38ad

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+1839
-402
lines changed

.ci/scripts/test_model.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,6 @@ test_model_with_qnn() {
201201
EXPORT_SCRIPT=bert
202202
elif [[ "${MODEL_NAME}" == "conv_former" ]]; then
203203
EXPORT_SCRIPT=conv_former
204-
EXTRA_FLAGS="--dataset imagenet-mini/val"
205204
elif [[ "${MODEL_NAME}" == "cvt" ]]; then
206205
EXPORT_SCRIPT=cvt
207206
elif [[ "${MODEL_NAME}" == "distilbert" ]]; then

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,12 @@ echo "Creating tokenizer.bin"
3333
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
3434

3535
set +e
36-
# Compile only as weight sharing is not applicable on x86
37-
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir . --llama_artifacts . --compile_only
36+
# Compile only as weight sharing is not applicable on x86.
37+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir ./stories_110m_pte_size --llama_artifacts . --compile_only
3838
exit_code1=$?
3939

4040
# Checks accuracy with weight sharing disabled since x86 does not support weight sharing.
41-
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir . --llama_artifacts . --enable_x86_64
41+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir ./stories_110m_accuracy --llama_artifacts . --enable_x86_64
4242
exit_code2=$?
4343

4444
# Check BC

.github/workflows/trunk.yml

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ jobs:
6060
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
6161
strategy:
6262
matrix:
63-
model: [add]
63+
model: [add, softmax, mv2]
6464
fail-fast: false
6565
with:
6666
runner: linux.2xlarge
@@ -72,6 +72,16 @@ jobs:
7272
MODEL_NAME=${{ matrix.model }}
7373
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
7474
conda activate "${CONDA_ENV}"
75+
if [[ ${{ matrix.model}} == "add" ]]; then
76+
SIM_LIMIT_SEC=60
77+
elif [[ ${{ matrix.model}} == "softmax" ]]; then
78+
SIM_LIMIT_SEC=60
79+
elif [[ ${{ matrix.model}} == "mv2" ]]; then
80+
SIM_LIMIT_SEC=5000
81+
else
82+
echo "Failed unsupported model selection ${{ matrix.model }}"
83+
exit 1
84+
fi
7585
7686
source .ci/scripts/utils.sh
7787
source .ci/scripts/zephyr-utils.sh
@@ -118,24 +128,22 @@ jobs:
118128
-C mps3_board.uart0.out_file='sim.out' \
119129
-C cpu0.CFGITCMSZ=15 \
120130
-C cpu0.CFGDTCMSZ=15 \
121-
--simlimit 120
131+
--simlimit ${SIM_LIMIT_SEC}
122132
123133
# Disable exit on error
124134
set +e
125135
# Report failure if any of the ouptut verification checks fail
126-
# store 0 if found (failure), 1 if not (success)
127136
grep -qF "ERROR" sim.out
128-
exit_status=$?
137+
exit_status=$? #store 0 if found (failure), 1 if not (success)
129138
if [[ "$exit_status" -eq "0" ]]; then
130139
cat sim.out
131140
set -e
132141
exit 1
133142
fi
134143
135144
# Report fail if simulation does not complete successfully
136-
# store 0 if found (success), 1 if not (failure)
137145
grep -qF "SUCCESS: Program complete, exiting." sim.out
138-
exit_status=$?
146+
exit_status=$? #store 0 if found (success), 1 if not (failure)
139147
if [[ "$exit_status" -eq "1" ]]; then
140148
cat sim.out
141149
set -e

backends/apple/coreml/TARGETS

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,26 @@ runtime.python_library(
6060
],
6161
)
6262

63+
runtime.python_library(
64+
name = "recipes",
65+
srcs = glob([
66+
"recipes/*.py",
67+
]),
68+
visibility = [
69+
"@EXECUTORCH_CLIENTS",
70+
],
71+
deps = [
72+
"fbsource//third-party/pypi/coremltools:coremltools",
73+
":backend",
74+
"//caffe2:torch",
75+
"//executorch/exir:lib",
76+
"//executorch/exir/backend:compile_spec_schema",
77+
"//executorch/exir/backend:partitioner",
78+
"//executorch/exir/backend:utils",
79+
"//executorch/export:lib",
80+
],
81+
)
82+
6383
runtime.cxx_python_extension(
6484
name = "executorchcoreml",
6585
srcs = [
@@ -103,6 +123,7 @@ runtime.python_test(
103123
"fbsource//third-party/pypi/pytest:pytest",
104124
":partitioner",
105125
":quantizer",
126+
":recipes",
106127
"//caffe2:torch",
107128
"//pytorch/vision:torchvision",
108129
],
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Copyright © 2025 Apple Inc. All rights reserved.
2+
#
3+
# Please refer to the license found in the LICENSE file in the root directory of the source tree.
4+
5+
6+
from executorch.export import recipe_registry
7+
8+
from .coreml_recipe_provider import CoreMLRecipeProvider
9+
from .coreml_recipe_types import CoreMLRecipeType
10+
11+
# Auto-register CoreML backend recipe provider
12+
recipe_registry.register_backend_recipe_provider(CoreMLRecipeProvider())
13+
14+
__all__ = [
15+
"CoreMLRecipeProvider",
16+
"CoreMLRecipeType",
17+
]
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
# Copyright © 2025 Apple Inc. All rights reserved.
2+
#
3+
# Please refer to the license found in the LICENSE file in the root directory of the source tree.
4+
5+
6+
from typing import Any, Optional, Sequence
7+
8+
import coremltools as ct
9+
10+
from executorch.backends.apple.coreml.compiler import CoreMLBackend
11+
from executorch.backends.apple.coreml.partition.coreml_partitioner import (
12+
CoreMLPartitioner,
13+
)
14+
from executorch.backends.apple.coreml.recipes.coreml_recipe_types import (
15+
COREML_BACKEND,
16+
CoreMLRecipeType,
17+
)
18+
19+
from executorch.exir import EdgeCompileConfig
20+
from executorch.export import (
21+
BackendRecipeProvider,
22+
ExportRecipe,
23+
LoweringRecipe,
24+
RecipeType,
25+
)
26+
27+
28+
class CoreMLRecipeProvider(BackendRecipeProvider):
29+
@property
30+
def backend_name(self) -> str:
31+
return COREML_BACKEND
32+
33+
def get_supported_recipes(self) -> Sequence[RecipeType]:
34+
return list(CoreMLRecipeType)
35+
36+
def create_recipe(
37+
self, recipe_type: RecipeType, **kwargs: Any
38+
) -> Optional[ExportRecipe]:
39+
"""Create CoreML recipe with precision and compute unit combinations"""
40+
41+
if recipe_type not in self.get_supported_recipes():
42+
return None
43+
44+
if ct is None:
45+
raise ImportError(
46+
"coremltools is required for CoreML recipes. "
47+
"Install it with: pip install coremltools"
48+
)
49+
50+
# Validate kwargs
51+
self._validate_recipe_kwargs(recipe_type, **kwargs)
52+
53+
# Parse recipe type to get precision and compute unit
54+
precision = None
55+
if recipe_type == CoreMLRecipeType.FP32:
56+
precision = ct.precision.FLOAT32
57+
elif recipe_type == CoreMLRecipeType.FP16:
58+
precision = ct.precision.FLOAT16
59+
60+
if precision is None:
61+
raise ValueError(f"Unknown precision for recipe: {recipe_type.value}")
62+
63+
return self._build_recipe(recipe_type, precision, **kwargs)
64+
65+
def _validate_recipe_kwargs(self, recipe_type: RecipeType, **kwargs: Any) -> None:
66+
if not kwargs:
67+
return
68+
expected_keys = {"minimum_deployment_target", "compute_unit"}
69+
unexpected = set(kwargs.keys()) - expected_keys
70+
if unexpected:
71+
raise ValueError(
72+
f"CoreML Recipes only accept 'minimum_deployment_target' or 'compute_unit' as parameter. "
73+
f"Unexpected parameters: {list(unexpected)}"
74+
)
75+
if "minimum_deployment_target" in kwargs:
76+
minimum_deployment_target = kwargs["minimum_deployment_target"]
77+
if not isinstance(minimum_deployment_target, ct.target):
78+
raise ValueError(
79+
f"Parameter 'minimum_deployment_target' must be an enum of type ct.target, got {type(minimum_deployment_target)}"
80+
)
81+
if "compute_unit" in kwargs:
82+
compute_unit = kwargs["compute_unit"]
83+
if not isinstance(compute_unit, ct.ComputeUnit):
84+
raise ValueError(
85+
f"Parameter 'compute_unit' must be an enum of type ct.ComputeUnit, got {type(compute_unit)}"
86+
)
87+
88+
def _build_recipe(
89+
self,
90+
recipe_type: RecipeType,
91+
precision: ct.precision,
92+
**kwargs: Any,
93+
) -> ExportRecipe:
94+
lowering_recipe = self._get_coreml_lowering_recipe(
95+
compute_precision=precision,
96+
**kwargs,
97+
)
98+
99+
return ExportRecipe(
100+
name=recipe_type.value,
101+
quantization_recipe=None, # TODO - add quantization recipe
102+
lowering_recipe=lowering_recipe,
103+
)
104+
105+
def _get_coreml_lowering_recipe(
106+
self,
107+
compute_precision: ct.precision,
108+
**kwargs: Any,
109+
) -> LoweringRecipe:
110+
compile_specs = CoreMLBackend.generate_compile_specs(
111+
compute_precision=compute_precision,
112+
**kwargs,
113+
)
114+
115+
minimum_deployment_target = kwargs.get("minimum_deployment_target", None)
116+
take_over_mutable_buffer = True
117+
if minimum_deployment_target and minimum_deployment_target < ct.target.iOS18:
118+
take_over_mutable_buffer = False
119+
120+
partitioner = CoreMLPartitioner(
121+
compile_specs=compile_specs,
122+
take_over_mutable_buffer=take_over_mutable_buffer,
123+
)
124+
125+
edge_compile_config = EdgeCompileConfig(
126+
_check_ir_validity=False,
127+
_skip_dim_order=False,
128+
)
129+
130+
return LoweringRecipe(
131+
partitioners=[partitioner], edge_compile_config=edge_compile_config
132+
)
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Copyright © 2025 Apple Inc. All rights reserved.
2+
#
3+
# Please refer to the license found in the LICENSE file in the root directory of the source tree.
4+
5+
6+
from executorch.export import RecipeType
7+
8+
9+
COREML_BACKEND: str = "coreml"
10+
11+
12+
class CoreMLRecipeType(RecipeType):
13+
"""CoreML-specific generic recipe types"""
14+
15+
# FP32 generic recipe, defaults to values published by the CoreML backend and partitioner
16+
# Precision = FP32, Default compute_unit = All (can be overriden by kwargs)
17+
FP32 = "coreml_fp32"
18+
19+
# FP16 generic recipe, defaults to values published by the CoreML backend and partitioner
20+
# Precision = FP32, Default compute_unit = All (can be overriden by kwargs)
21+
FP16 = "coreml_fp16"
22+
23+
@classmethod
24+
def get_backend_name(cls) -> str:
25+
return COREML_BACKEND

0 commit comments

Comments
 (0)