Skip to content

Commit 44ee086

Browse files
committed
Merge remote-tracking branch 'origin/main' into start-pos-api-llava
2 parents 5eab404 + 85ec533 commit 44ee086

File tree

166 files changed

+3736
-1442
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

166 files changed

+3736
-1442
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
36e3dd54effb3f6d13d792029609292fdd5502bb
1+
40b02a2dc61bbf901a2df91719f47c98d65368ec

.ci/scripts/unittest-macos-cmake.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,3 @@ ${CONDA_RUN} pytest -n auto --cov=./ --cov-report=xml
1111
# Run gtest
1212
LLVM_PROFDATA="xcrun llvm-profdata" LLVM_COV="xcrun llvm-cov" \
1313
${CONDA_RUN} test/run_oss_cpp_tests.sh
14-
${CONDA_RUN} test/check_for_installed_private_headers_in_cmake_out.sh

.github/workflows/apple.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ jobs:
156156
"kernels_llm"
157157
"kernels_optimized"
158158
"kernels_quantized"
159+
"kernels_torchao"
159160
"threadpool"
160161
)
161162

.github/workflows/build-presets.yml

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ jobs:
109109
strategy:
110110
fail-fast: false
111111
matrix:
112-
preset: [pybind]
112+
preset: [pybind, windows]
113113
with:
114114
job-name: build
115115
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -119,14 +119,24 @@ jobs:
119119
set -eux
120120
conda init powershell
121121
powershell -Command "& {
122-
\$ErrorActionPreference = 'Stop'
123122
Set-PSDebug -Trace 1
123+
\$ErrorActionPreference = 'Stop'
124+
\$PSNativeCommandUseErrorActionPreference = \$true
124125
125126
conda create --yes --quiet -n et python=3.12
126127
conda activate et
127-
128128
python install_requirements.py
129-
cmake --preset ${{ matrix.preset }}
129+
130+
cmake --preset ${{ matrix.preset }} -T ClangCL
131+
if (\$LASTEXITCODE -ne 0) {
132+
Write-Host "CMake configuration was unsuccessful. Exit code: \$LASTEXITCODE."
133+
exit \$LASTEXITCODE
134+
}
135+
130136
\$numCores = [System.Environment]::GetEnvironmentVariable('NUMBER_OF_PROCESSORS') - 1
131137
cmake --build cmake-out -j \$numCores
138+
if (\$LASTEXITCODE -ne 0) {
139+
Write-Host "CMake build was unsuccessful. Exit code: \$LASTEXITCODE."
140+
exit \$LASTEXITCODE
141+
}
132142
}"

.github/workflows/pull.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ jobs:
406406
output=$(ls -la cmake-out/test/size_test)
407407
arr=($output)
408408
size=${arr[4]}
409-
threshold="51744"
409+
threshold="51752"
410410
if [[ "$size" -le "$threshold" ]]; then
411411
echo "Success $size <= $threshold"
412412
else
@@ -860,8 +860,9 @@ jobs:
860860
# Run pytest
861861
PYTHON_EXECUTABLE=python bash backends/nxp/run_unittests.sh
862862
863-
# Run aot example:
864-
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh
863+
# Run aot examples:
864+
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh cifar10
865+
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh mobilenetv2
865866
866867
867868
test-vulkan-models-linux:

.github/workflows/stale.yml

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -93,31 +93,31 @@ jobs:
9393
if (labels.includes("Stale")) {
9494
core.info(`[${pull.number}] Closing PR.`);
9595
numAPIRequests += 1;
96-
//await github.rest.issues.update({
97-
//owner: "pytorch",
98-
//repo: "executorch",
99-
//issue_number: pull.number,
100-
//state: "closed",
101-
//});
96+
await github.rest.issues.update({
97+
owner: "pytorch",
98+
repo: "executorch",
99+
issue_number: pull.number,
100+
state: "closed",
101+
});
102102
} else {
103103
// For PRs not labeled stale, label them stale.
104104
core.info(`[${pull.number}] Labeling PR as stale.`);
105105
106106
numAPIRequests += 1;
107-
//await github.rest.issues.createComment({
108-
//owner: "pytorch",
109-
//repo: "executorch",
110-
//issue_number: pull.number,
111-
//body: STALE_MESSAGE,
112-
//});
107+
await github.rest.issues.createComment({
108+
owner: "pytorch",
109+
repo: "executorch",
110+
issue_number: pull.number,
111+
body: STALE_MESSAGE,
112+
});
113113
114114
numAPIRequests += 1;
115-
//await github.rest.issues.addLabels({
116-
//owner: "pytorch",
117-
//repo: "executorch",
118-
//issue_number: pull.number,
119-
//labels: ["Stale"],
120-
//});
115+
await github.rest.issues.addLabels({
116+
owner: "pytorch",
117+
repo: "executorch",
118+
issue_number: pull.number,
119+
labels: ["Stale"],
120+
});
121121
}
122122
}
123123

.github/workflows/trunk.yml

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -940,3 +940,42 @@ jobs:
940940
build-mode: Release
941941
build-tool: cmake
942942
docker-image: ci-image:executorch-ubuntu-22.04-clang12
943+
944+
test-mcu-models:
945+
name: test-mcu-models
946+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
947+
strategy:
948+
matrix:
949+
include:
950+
- build-tool: cmake
951+
fail-fast: false
952+
permissions:
953+
id-token: write
954+
contents: read
955+
with:
956+
runner: linux.2xlarge
957+
docker-image: ci-image:executorch-ubuntu-22.04-arm-sdk
958+
submodules: 'recursive'
959+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
960+
timeout: 90
961+
script: |
962+
BUILD_TOOL=${{ matrix.build-tool }}
963+
964+
# The generic Linux job chooses to use base env, not the one setup by the image
965+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
966+
conda activate "${CONDA_ENV}"
967+
968+
# Try to mirror these as closely as possible
969+
source .ci/scripts/utils.sh
970+
install_executorch "--use-pt-pinned-commit"
971+
972+
.ci/scripts/setup-arm-baremetal-tools.sh
973+
source examples/arm/ethos-u-scratch/setup_path.sh
974+
975+
# Run selective Build
976+
chmod +x examples/selective_build/test_selective_build.sh
977+
examples/selective_build/test_selective_build.sh "${BUILD_TOOL}"
978+
979+
# Run MCU models
980+
chmod +x examples/arm/run_mcu_models_fvp.sh
981+
examples/arm/run_mcu_models_fvp.sh --target=cortex-m55

CMakeLists.txt

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -485,29 +485,24 @@ install(
485485
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/core
486486
FILES_MATCHING
487487
PATTERN "*.h"
488-
PATTERN "testing_util" EXCLUDE
489488
)
490489
install(
491490
DIRECTORY runtime/executor/
492491
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/executor
493492
FILES_MATCHING
494493
PATTERN "*.h"
495-
PATTERN "test" EXCLUDE
496-
PATTERN "platform_memory_allocator.h" EXCLUDE
497494
)
498495
install(
499496
DIRECTORY runtime/kernel/
500497
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/kernel
501498
FILES_MATCHING
502499
PATTERN "*.h"
503-
PATTERN "test" EXCLUDE
504500
)
505501
install(
506502
DIRECTORY runtime/platform/
507503
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/runtime/platform
508504
FILES_MATCHING
509505
PATTERN "*.h"
510-
PATTERN "test" EXCLUDE
511506
)
512507
install(
513508
DIRECTORY extension/kernel_util/
@@ -592,15 +587,11 @@ endif()
592587

593588
if(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
594589
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
595-
if(NOT WIN32)
596-
set(data_loader_exclude_pattern "*mman_windows.h")
597-
endif()
598590
install(
599591
DIRECTORY extension/data_loader/
600592
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/executorch/extension/data_loader
601593
FILES_MATCHING
602594
PATTERN "*.h"
603-
PATTERN ${data_loader_exclude_pattern} EXCLUDE
604595
)
605596
list(APPEND _executorch_extensions extension_data_loader)
606597
endif()

CMakePresets.json

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,21 @@
150150
]
151151
}
152152
},
153+
{
154+
"name": "windows",
155+
"displayName": "Build ExecuTorch for Windows",
156+
"inherits": ["common"],
157+
"cacheVariables": {
158+
"CMAKE_SYSTEM_NAME": "Windows",
159+
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/windows.cmake"
160+
},
161+
"toolset": "ClangCL",
162+
"condition": {
163+
"lhs": "${hostSystemName}",
164+
"type": "equals",
165+
"rhs": "Windows"
166+
}
167+
},
153168
{
154169
"name": "zephyr",
155170
"displayName": "Build ExecuTorch for Zephyr RTOS",

backends/apple/coreml/compiler/coreml_preprocess.py

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@
1717
import coremltools as ct
1818
import coremltools.optimize as cto
1919
from executorch.backends.apple.coreml import executorchcoreml
20+
from executorch.backends.apple.coreml.compiler.enumerated_shape_utils import (
21+
_get_ct_inputs,
22+
_SymbolicShapeToEnumeratedShapeMap,
23+
)
2024
from executorch.backends.apple.coreml.logging import get_coreml_log_level
2125
from executorch.exir.backend.backend_details import (
2226
BackendDetails,
@@ -37,6 +41,7 @@ class COMPILE_SPEC_KEYS(Enum):
3741
MIN_DEPLOYMENT_TARGET = "min_deployment_target"
3842
MODEL_COMPUTE_PRECISION = "model_compute_precision"
3943
OP_LINEAR_QUANTIZER_CONFIG = "op_linear_quantizer_config"
44+
ENUMERATED_SHAPES = "enumerated_shapes"
4045

4146

4247
class MODEL_PATHS(Enum):
@@ -143,7 +148,7 @@ def generate_minimum_deployment_target_compile_spec(
143148
@staticmethod
144149
def min_deployment_target_from_compile_specs(
145150
compile_specs: List[CompileSpec],
146-
) -> ct.target:
151+
) -> Optional[ct.target]:
147152
"""
148153
Returns the minimum deployment target by parsing the list of compile specs.
149154
"""
@@ -214,6 +219,54 @@ def op_linear_quantizer_config_from_compile_specs(
214219

215220
return None
216221

222+
@staticmethod
223+
def generate_enumerated_shapes_compile_spec(
224+
ep: ExportedProgram,
225+
enumerated_shapes: Dict[str, List[List[int]]],
226+
) -> CompileSpec:
227+
"""
228+
Returns the compile spec representing the model enumerated shapes
229+
enumerated_shapes is a dictionary for each input to its enumerated shapes, e.g.,
230+
231+
enumerated_shapes = {
232+
{"x": [[1, 1, 24], [8, 9, 24]]
233+
{"y": [[1, 6], [30, 6]],
234+
]
235+
236+
means the model can handle x can be shape [1, 1, 24] or [8, 9, 24] and y can be shape [1, 6] or [30, 6].
237+
238+
Only multiple inputs can have enumerated shapes if using iOS18 or later.
239+
In this case, each input must have the same number of enumerated shapes, and these shapes are tied together
240+
by their order in the list. For example, the model above can handle x with shape [1, 1, 24] and y with shape [1, 6],
241+
or x with shape [8, 9, 24] and y with shape [30, 6], but not x with shape [1, 1, 24] and y with shape [30, 6].
242+
243+
Passing incorrect shapes at runtime will result in an error.
244+
"""
245+
emap = _SymbolicShapeToEnumeratedShapeMap.from_exported_program(
246+
ep,
247+
enumerated_shapes,
248+
)
249+
str_representation = emap.to_json()
250+
byte_representation = str_representation.encode("utf-8")
251+
return CompileSpec(
252+
COMPILE_SPEC_KEYS.ENUMERATED_SHAPES.value,
253+
byte_representation,
254+
)
255+
256+
@staticmethod
257+
def enumerated_shapes_from_compile_specs(
258+
compile_specs: List[CompileSpec],
259+
) -> cto.coreml.OpLinearQuantizerConfig:
260+
"""
261+
Returns the model's post conversion quantization by parsing the list of compile specs.
262+
"""
263+
for compile_spec in compile_specs:
264+
if compile_spec.key == COMPILE_SPEC_KEYS.ENUMERATED_SHAPES.value:
265+
emap_json = compile_spec.value.decode("utf-8")
266+
emap = _SymbolicShapeToEnumeratedShapeMap.from_json(emap_json)
267+
return emap
268+
return None
269+
217270
@staticmethod
218271
def generate_compile_specs(
219272
compute_unit: ct.ComputeUnit = ct.ComputeUnit.ALL,
@@ -446,6 +499,28 @@ def preprocess(
446499
op_linear_quantizer_config = (
447500
CoreMLBackend.op_linear_quantizer_config_from_compile_specs(compile_specs)
448501
)
502+
enumerated_shapes = CoreMLBackend.enumerated_shapes_from_compile_specs(
503+
compile_specs
504+
)
505+
506+
# If using enumerated shapes, we need to pass the inputs to CoreML's convert() function
507+
# explicitly
508+
ct_inputs = None
509+
if enumerated_shapes is not None:
510+
ct_inputs = _get_ct_inputs(edge_program, enumerated_shapes)
511+
512+
# Check there are not multiple enumerated inputs if iOS is below 18
513+
if (minimum_deployment_target is None) or (
514+
minimum_deployment_target < ct.target.iOS18
515+
):
516+
n_enumerated_inputs = 0
517+
for ct_in in ct_inputs:
518+
if isinstance(ct_in.shape, ct.EnumeratedShapes):
519+
n_enumerated_inputs += 1
520+
if n_enumerated_inputs > 1:
521+
raise ValueError(
522+
f"You're program has {n_enumerated_inputs}, but the minimum_deployment_target is set to {minimum_deployment_target}. Multiple enumerated inputs requires iOS18 or later."
523+
)
449524

450525
# Load the model if MODEL_TYPE is 'COMPILED_MODEL'. This step is necessary because
451526
# get_compiled_model_path() requires a loaded model.
@@ -459,6 +534,7 @@ def preprocess(
459534
compute_precision=model_compute_precision,
460535
minimum_deployment_target=minimum_deployment_target,
461536
compute_units=compute_units,
537+
inputs=ct_inputs,
462538
)
463539

464540
if op_linear_quantizer_config is not None:

0 commit comments

Comments
 (0)