Skip to content

Commit 7cc8c68

Browse files
Merge branch 'main' into io_params_1
2 parents 2b373a7 + b793d45 commit 7cc8c68

File tree

124 files changed

+6726
-1215
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

124 files changed

+6726
-1215
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
90f1e7bed15ca5e48c61c5b6dc5ad4810524f82f
1+
ab43fe4bdf5ccd82897f0e982c451a0127bd175e

.github/workflows/trunk.yml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,37 @@ jobs:
302302
exit 1
303303
fi
304304
305+
test-arm-ootb-linux:
306+
name: test-arm-ootb-linux
307+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
308+
permissions:
309+
id-token: write
310+
contents: read
311+
with:
312+
runner: linux.2xlarge
313+
docker-image: executorch-ubuntu-22.04-arm-sdk
314+
submodules: 'recursive'
315+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
316+
timeout: 90
317+
script: |
318+
# The generic Linux job chooses to use base env, not the one setup by the image
319+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
320+
conda activate "${CONDA_ENV}"
321+
322+
# Follow the steps required before running the notebooks
323+
# Try to mirror these as closely as possible
324+
source .ci/scripts/utils.sh
325+
install_executorch "--use-pt-pinned-commit"
326+
327+
.ci/scripts/setup-arm-baremetal-tools.sh
328+
source examples/arm/ethos-u-scratch/setup_path.sh
329+
330+
# Install requirements for converting notebooks
331+
pip install notebook
332+
333+
# Run OOTB tests
334+
backends/arm/test/test_arm_ootb.sh
335+
305336
test-coreml-delegate:
306337
name: test-coreml-delegate
307338
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main

CMakeLists.txt

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,11 @@
4545
# ~~~
4646
#
4747

48-
cmake_minimum_required(VERSION 3.24)
48+
# TODO Lower to 3.24 when XNNPACK dependency is updated to include
49+
# https://github.com/google/XNNPACK/commit/c690daa67f883e1b627aadf7684c06797e9a0684
50+
cmake_minimum_required(VERSION 3.29)
4951
project(executorch)
5052

51-
# MARK: - Start EXECUTORCH_H12025_BUILD_MIGRATION
52-
5353
include(${PROJECT_SOURCE_DIR}/tools/cmake/common/preset.cmake)
5454
include(${PROJECT_SOURCE_DIR}/tools/cmake/Utils.cmake)
5555
include(CMakeDependentOption)
@@ -82,6 +82,7 @@ announce_configured_options(BUCK2)
8282

8383
announce_configured_options(CMAKE_CXX_COMPILER_ID)
8484
announce_configured_options(CMAKE_TOOLCHAIN_FILE)
85+
announce_configured_options(BUILD_TESTING)
8586

8687
load_build_preset()
8788
include(${PROJECT_SOURCE_DIR}/tools/cmake/preset/default.cmake)
@@ -97,11 +98,6 @@ else()
9798
endif()
9899
announce_configured_options(CCACHE_PROGRAM)
99100

100-
# Print all the configs that were called with announce_configured_options.
101-
print_configured_options()
102-
103-
# MARK: - End EXECUTORCH_H12025_BUILD_MIGRATION
104-
105101
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
106102

107103
# Setup RPATH. See
@@ -566,6 +562,10 @@ if(EXECUTORCH_BUILD_EXTENSION_LLM)
566562
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizers)
567563
endif()
568564

565+
if(EXECUTORCH_BUILD_EXTENSION_LLM_APPLE)
566+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/apple)
567+
endif()
568+
569569
if(EXECUTORCH_BUILD_EXTENSION_LLM_RUNNER)
570570
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/runner)
571571
endif()
@@ -750,3 +750,6 @@ if(EXECUTORCH_BUILD_ANDROID_JNI)
750750
endif()
751751

752752
include(Test.cmake)
753+
754+
# Print all the configs that were called with announce_configured_options.
755+
print_configured_options()

CMakePresets.json

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,15 @@
88
},
99
{
1010
"name": "macos",
11-
"displayName": "Build everything buildable on macOS",
11+
"displayName": "Build ExecuTorch for macOS",
1212
"inherits": ["common"],
1313
"generator": "Xcode",
1414
"cacheVariables": {
1515
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/third-party/ios-cmake/ios.toolchain.cmake",
1616
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/macos.cmake",
1717
"PLATFORM": "MAC_ARM64",
18-
"DEPLOYMENT_TARGET": "12.0"
18+
"DEPLOYMENT_TARGET": "12.0",
19+
"CMAKE_MACOSX_BUNDLE": "OFF"
1920
},
2021
"condition": {
2122
"lhs": "${hostSystemName}",
@@ -25,7 +26,7 @@
2526
},
2627
{
2728
"name": "ios",
28-
"displayName": "Build everything buildable on iOS",
29+
"displayName": "Build ExecuTorch for iOS",
2930
"inherits": ["common"],
3031
"generator": "Xcode",
3132
"cacheVariables": {
@@ -42,7 +43,7 @@
4243
},
4344
{
4445
"name": "ios-simulator",
45-
"displayName": "Build everything buildable on iOS simulator",
46+
"displayName": "Build ExecuTorch for iOS Simulator",
4647
"inherits": ["common"],
4748
"generator": "Xcode",
4849
"cacheVariables": {
@@ -59,7 +60,7 @@
5960
},
6061
{
6162
"name": "linux",
62-
"displayName": "Build everything buildable on Linux",
63+
"displayName": "Build ExecuTorch for Linux",
6364
"inherits": ["common"],
6465
"cacheVariables": {
6566
"CMAKE_SYSTEM_NAME": "Linux",
@@ -88,29 +89,21 @@
8889
{
8990
"name": "llm",
9091
"displayName": "Build LLM libraries",
91-
"inherits": [
92-
"common"
93-
],
92+
"inherits": ["common"],
9493
"cacheVariables": {
9594
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/llm.cmake",
9695
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
9796
},
9897
"condition": {
9998
"type": "inList",
10099
"string": "${hostSystemName}",
101-
"list": [
102-
"Darwin",
103-
"Linux",
104-
"Windows"
105-
]
100+
"list": ["Darwin", "Linux", "Windows"]
106101
}
107102
},
108103
{
109104
"name": "zephyr",
110-
"displayName": "Build everything buildable on Zephyr RTOS",
111-
"inherits": [
112-
"common"
113-
],
105+
"displayName": "Build ExecuTorch for Zephyr RTOS",
106+
"inherits": ["common"],
114107
"cacheVariables": {
115108
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/zephyr.cmake",
116109
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/examples/zephyr/x86_64-linux-arm-zephyr-eabi-gcc.cmake"

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ def preprocess_model(
365365

366366
match model_type:
367367
case CoreMLBackend.MODEL_TYPE.COMPILED_MODEL:
368-
shutil.rmtree(str(model_path.resolve()))
368+
shutil.rmtree(str(model_path.resolve()), ignore_errors=True)
369369
model_path = model_dir_path / MODEL_PATHS.COMPILED_MODEL.value
370370
compiled_model_path = mlmodel.get_compiled_model_path()
371371
shutil.move(
@@ -396,7 +396,7 @@ def preprocess_model(
396396
for key, value in model_debug_info.debugSymbolToHandles.items()
397397
}
398398

399-
shutil.rmtree(str(dir_path.resolve()))
399+
shutil.rmtree(str(dir_path.resolve()), ignore_errors=True)
400400
return PreprocessResult(
401401
processed_bytes=processed_bytes,
402402
debug_handle_map=debug_handle_map,

backends/apple/coreml/partition/coreml_partitioner.py

Lines changed: 92 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,21 @@
2828

2929
class OperatorsSupportedForCoreMLBackend(OperatorSupportBase):
3030
def __init__(
31-
self, skip_ops_for_coreml_delegation: Optional[List[str]] = None
31+
self,
32+
skip_ops_for_coreml_delegation: Optional[List[str]] = None,
33+
lower_full_graph: bool = False,
3234
) -> None:
3335
if skip_ops_for_coreml_delegation is None:
3436
skip_ops_for_coreml_delegation = []
3537
super().__init__()
3638
self.skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
39+
self.lower_full_graph = lower_full_graph
40+
self._logged_msgs = set()
41+
42+
def log_once(self, msg: str) -> None:
43+
if msg not in self._logged_msgs:
44+
logging.info(msg)
45+
self._logged_msgs.add(msg)
3746

3847
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
3948
# get_attr node can always be supported on any backend
@@ -44,14 +53,63 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
4453
# skip ops if specified by user
4554
node_target_name = getattr(node.target, "__name__", "").lower()
4655
if node_target_name in (self.skip_ops_for_coreml_delegation or []):
56+
self.log_once(
57+
"Skipping op for CoreML delegation because it is in skip_ops_for_coreml_delegation: "
58+
+ node_target_name
59+
)
60+
assert (
61+
not self.lower_full_graph
62+
), "Cannot have skip_ops_for_coreml_delegation when lower_full_graph is True"
4763
return False
64+
65+
# TODO: enable this after bugs in ExecuTorch's partitioner are fixed
66+
# # If lower_full_graph=False, do not partition nodes with symbolic args because it can result in symbolic args
67+
# # in the placeholders due to partitioning, which CoreML does not support
68+
# if not self.lower_full_graph and any(
69+
# isinstance(arg, torch.fx.Node)
70+
# and isinstance(
71+
# arg.meta.get("val", None),
72+
# (torch.SymInt, torch.SymBool, torch.SymFloat),
73+
# )
74+
# for arg in node.args
75+
# ):
76+
# self.log_once(
77+
# "Skipping op for CoreML delegation because it contains symbolic args: "
78+
# + node_target_name
79+
# )
80+
# assert not self.lower_full_graph
81+
# return False
82+
4883
# query coremltools to see if node is supported
49-
return ct.converters.mil.frontend.torch.is_torch_fx_node_supported(node)
84+
is_supported = ct.converters.mil.frontend.torch.is_torch_fx_node_supported(
85+
node
86+
)
87+
if not is_supported:
88+
if self.lower_full_graph:
89+
raise NotImplementedError(
90+
f"""CoreML does not support the op {node_target_name}, but you have set lower_full_graph=True in the CoreMLPartitioner.
91+
92+
Please set lower_full_graph=False in the CoreMLPartitioner to allow running unsupported ops outside of CoreML. Note that setting lower_full_graph=False may affect performance of CoreML and the available features.
93+
As an alternative to setting lower_full_graph=False, you can try rewriting your model to avoid using this op.
94+
95+
Also consider filing an issue with Apple's coremltools repo to request support for the op: https://github.com/apple/coremltools/issues
96+
Do not file an issue with ExecuTorch for op support.
97+
"""
98+
)
99+
self.log_once(
100+
"Skipping op for CoreML delegation because it is not supported by CoreML: "
101+
+ node_target_name
102+
)
103+
return is_supported
50104
# cowardly refuse to support all other types of node:
51105
# 1. placeholder / output nodes should not be tagged
52106
# reference: https://github.com/pytorch/executorch/pull/1398
53107
# 2. call_module / call_method should have been replaced with call_function?
54108
else:
109+
self.log_once(
110+
"Skipping op for CoreML delegation because it is not get_attr or call_function: "
111+
+ node.op
112+
)
55113
return False
56114

57115

@@ -62,6 +120,8 @@ def __init__(
62120
skip_ops_for_coreml_delegation: Optional[List[str]] = None,
63121
compile_specs: Optional[List[CompileSpec]] = None,
64122
take_over_mutable_buffer: Optional[bool] = True,
123+
lower_full_graph: bool = False,
124+
take_over_constant_data: bool = True,
65125
) -> None:
66126
if skip_ops_for_coreml_delegation is None:
67127
skip_ops_for_coreml_delegation = []
@@ -71,6 +131,20 @@ def __init__(
71131
compile_specs=compile_specs if compile_specs is not None else [],
72132
)
73133
self.take_over_mutable_buffer = take_over_mutable_buffer
134+
self.lower_full_graph = lower_full_graph
135+
self.take_over_constant_data = take_over_constant_data
136+
self._logged_msgs = set()
137+
138+
if self.lower_full_graph:
139+
assert (
140+
len(self.skip_ops_for_coreml_delegation) == 0
141+
), "When lower_full_graph=True, you cannot set skip_ops_for_coreml_delegation"
142+
assert (
143+
self.take_over_constant_data
144+
), "When lower_full_graph=True, you must set take_over_constant_data=True"
145+
assert (
146+
self.take_over_mutable_buffer
147+
), "When lower_full_graph=True, you must set take_over_mutable_buffer=True"
74148

75149
def partition(self, exported_program: ExportedProgram) -> PartitionResult:
76150
# Run the CapabilityBasedPartitioner to return the largest possible
@@ -80,7 +154,9 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
80154

81155
capability_partitioner = CapabilityBasedPartitioner(
82156
exported_program.graph_module,
83-
OperatorsSupportedForCoreMLBackend(self.skip_ops_for_coreml_delegation),
157+
OperatorsSupportedForCoreMLBackend(
158+
self.skip_ops_for_coreml_delegation, self.lower_full_graph
159+
),
84160
allows_single_node_partition=True,
85161
)
86162
partition_list = capability_partitioner.propose_partitions()
@@ -90,7 +166,8 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
90166
node.meta["delegation_tag"] = tag
91167
partition_tags[tag] = self.delegation_spec
92168

93-
tag_constant_data(exported_program)
169+
if self.take_over_constant_data:
170+
tag_constant_data(exported_program)
94171
if self.take_over_mutable_buffer:
95172
logger.info(
96173
"Core ML partitioner will take over torch mutable buffer as Core ML state, "
@@ -105,12 +182,18 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
105182
tagged_exported_program=exported_program, partition_tags=partition_tags
106183
)
107184

185+
def log_once(self, msg: str) -> None:
186+
if msg not in self._logged_msgs:
187+
logging.info(msg)
188+
self._logged_msgs.add(msg)
189+
108190
def ops_to_not_decompose(
109191
self, ep: ExportedProgram
110192
) -> Tuple[List[torch._ops.OpOverload], Optional[Callable[[torch.fx.Node], bool]]]:
111193
do_not_decompose = []
112-
op_support = OperatorsSupportedForCoreMLBackend()
113-
_logged_warnings = set()
194+
op_support = OperatorsSupportedForCoreMLBackend(
195+
self.skip_ops_for_coreml_delegation, self.lower_full_graph
196+
)
114197

115198
# CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace
116199
# TODO: upstream fixes, but pending ET consuming a new published version of coremltools with the
@@ -134,9 +217,7 @@ def ops_to_not_decompose(
134217
except Exception as e:
135218
# CoreML's op_support.is_node_supported will sometimes throw
136219
# for unsupported ops, rather than returning False
137-
warn_str = f"Encountered exception when checking node support: {e}"
138-
if warn_str not in _logged_warnings:
139-
logger.warning(warn_str)
140-
_logged_warnings.add(warn_str)
141-
220+
self.log_once(
221+
f"Encountered exception when checking node support, treating node as unsupported: {e}"
222+
)
142223
return do_not_decompose, None

0 commit comments

Comments
 (0)