Skip to content

Commit 7926335

Browse files
authored
Merge branch 'main' into wasm-bindings-js
2 parents 1040b15 + 37e3003 commit 7926335

File tree

36 files changed

+2213
-94
lines changed

36 files changed

+2213
-94
lines changed

.ci/scripts/test_model.sh

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -232,21 +232,24 @@ test_model_with_qnn() {
232232
# @param should_test If true, build and test the model using the coreml_executor_runner.
233233
test_model_with_coreml() {
234234
local should_test="$1"
235+
local test_with_pybindings="$2"
236+
local dtype="$3"
235237

236238
if [[ "${BUILD_TOOL}" != "cmake" ]]; then
237239
echo "coreml only supports cmake."
238240
exit 1
239241
fi
240242

241-
DTYPE=float16
243+
RUN_WITH_PYBINDINGS=""
244+
if [[ "${test_with_pybindings}" == true ]]; then
245+
echo \"Running with pybindings\"
246+
export RUN_WITH_PYBINDINGS="--run_with_pybindings"
247+
fi
242248

243-
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}" --use_partitioner
249+
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision ${dtype} --use_partitioner ${RUN_WITH_PYBINDINGS}
244250
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
245251

246252
if [ -n "$EXPORTED_MODEL" ]; then
247-
EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
248-
mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
249-
EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
250253
echo "OK exported model: $EXPORTED_MODEL"
251254
else
252255
echo "[error] failed to export model: no .pte file found"
@@ -303,7 +306,15 @@ elif [[ "${BACKEND}" == *"coreml"* ]]; then
303306
if [[ "${BACKEND}" == *"test"* ]]; then
304307
should_test_coreml=true
305308
fi
306-
test_model_with_coreml "${should_test_coreml}"
309+
test_with_pybindings=false
310+
if [[ "${BACKEND}" == *"pybind"* ]]; then
311+
test_with_pybindings=true
312+
fi
313+
dtype=float16
314+
if [[ "${BACKEND}" == *"float32"* ]]; then
315+
dtype=float32
316+
fi
317+
test_model_with_coreml "${should_test_coreml}" "${test_with_pybindings}" "${dtype}"
307318
if [[ $? -eq 0 ]]; then
308319
prepare_artifacts_upload
309320
fi

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,10 @@ exit_code1=$?
4141
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir . --llama_artifacts . --enable_x86_64
4242
exit_code2=$?
4343

44+
# Check BC
45+
bash backends/qualcomm/bc/test_qnn_static_llama_bc.sh
46+
exit_code3=$?
47+
4448
# Check the exit codes and print messages
4549
if [ $exit_code1 -ne 0 ]; then
4650
echo "Static Llama compile only with weight sharing test failed. $exit_code1."
@@ -50,8 +54,12 @@ if [ $exit_code2 -ne 0 ]; then
5054
echo "Static Llama accuracy test failed. $exit_code2."
5155
fi
5256

57+
if [ $exit_code3 -ne 0 ]; then
58+
echo "Static Llama BACKWARD COMPATIBILITY test failed. $exit_code3."
59+
fi
60+
5361
# Return failure if either program failed
54-
if [ $exit_code1 -ne 0 ] || [ $exit_code2 -ne 0 ]; then
62+
if [ $exit_code1 -ne 0 ] || [ $exit_code2 -ne 0 ] || [ $exit_code3 -ne 0 ]; then
5563
exit 1
5664
else
5765
exit 0

.github/workflows/trunk.yml

Lines changed: 43 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ concurrency:
1818
cancel-in-progress: true
1919

2020
jobs:
21-
test-models-macos:
22-
name: test-models-macos
21+
test-models-macos-cpu:
22+
name: test-models-macos-cpu
2323
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
2424
strategy:
2525
matrix:
@@ -568,10 +568,12 @@ jobs:
568568
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
569569
PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh ${{ matrix.model }} "cmake" "qnn"
570570
571-
test-apple-model:
572-
name: test-apple-model
571+
test-models-macos-coreml:
572+
name: test-models-macos-coreml
573573
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
574574
strategy:
575+
matrix:
576+
model: [dl3, edsr, efficient_sam, emformer_join, emformer_transcribe, ic3, ic4, mobilebert, mv2, mv3, resnet50, vit, w2l]
575577
fail-fast: false
576578
with:
577579
runner: macos-m1-stable
@@ -580,7 +582,23 @@ jobs:
580582
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
581583
timeout: 90
582584
script: |
585+
MODEL_NAME=${{ matrix.model }}
583586
BUILD_TOOL=cmake
587+
BACKEND="coreml-pybind"
588+
589+
590+
# Set model specific overrides
591+
if [[ "${MODEL_NAME}" == "mobilebert" ]]; then
592+
# See https://github.com/pytorch/executorch/issues/12907
593+
# mobilebert has nan output on FP16, and high MSE on fp32, so we disable runtime test now
594+
BACKEND="coreml"
595+
fi
596+
597+
if [[ "${MODEL_NAME}" == "efficient_sam" ]]; then
598+
# See https://github.com/pytorch/executorch/issues/12906
599+
# efficient_sam fails to run on CoreML
600+
BACKEND="coreml"
601+
fi
584602
585603
bash .ci/scripts/setup-conda.sh
586604
@@ -589,13 +607,28 @@ jobs:
589607
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh
590608
echo "Finishing installing coreml."
591609
592-
# Build and test coreml model
593-
MODELS=(mv3 ic4 resnet50 edsr mobilebert w2l)
594-
for MODEL_NAME in "${MODELS[@]}"; do
595-
echo "::group::Exporting coreml model: $MODEL_NAME"
596-
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "coreml"
597-
echo "::endgroup::"
610+
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
611+
612+
test-models-macos-mps:
613+
name: test-models-macos-mps
614+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
615+
strategy:
616+
fail-fast: false
617+
with:
618+
runner: macos-m1-stable
619+
python-version: '3.11'
620+
submodules: 'recursive'
621+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
622+
timeout: 90
623+
script: |
624+
BUILD_TOOL=cmake
625+
bash .ci/scripts/setup-conda.sh
626+
627+
# Setup MacOS dependencies as there is no Docker support on MacOS atm
628+
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
598629
630+
# Build and test mps model
631+
for MODEL_NAME in mv3 ic4 resnet50 edsr mobilebert w2l; do
599632
echo "::group::Exporting mps model: $MODEL_NAME"
600633
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "mps"
601634
echo "::endgroup::"

CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -821,6 +821,10 @@ endif()
821821
if(EXECUTORCH_BUILD_VULKAN)
822822
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/vulkan)
823823
endif()
824+
if(EXECUTORCH_BUILD_VGF)
825+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/arm)
826+
endif()
827+
824828

825829
if(EXECUTORCH_BUILD_ANDROID_JNI)
826830
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/android)

backends/apple/coreml/scripts/generate_test_models.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ cd "$EXECUTORCH_ROOT_PATH"
2222
MODELS=("add" "add_mul" "mul" "mv3")
2323
for MODEL in "${MODELS[@]}"
2424
do
25-
echo "Executorch: Generating $MODEL model"
25+
echo "Executorch: Generating $MODEL model"
2626
# TODO: Don't use the script in examples directory.
2727
python3 -m examples.apple.coreml.scripts.export --model_name "$MODEL" --save_processed_bytes
2828
mv -f "$MODEL""_coreml_all.pte" "$COREML_DIR_PATH/runtime/test/models"
@@ -36,7 +36,7 @@ COMPILE_MODELS=("add_mul")
3636
echo "Executorch: Generating compiled model"
3737
for MODEL in "${COMPILE_MODELS[@]}"
3838
do
39-
echo "Executorch: Generating compiled $MODEL model"
39+
echo "Executorch: Generating compiled $MODEL model"
4040
python3 -m examples.apple.coreml.scripts.export --model_name "$MODEL" --compile
4141
mv -f "$MODEL""_compiled_coreml_all.pte" "$COREML_DIR_PATH/runtime/test/models"
4242
done

backends/arm/CMakeLists.txt

Lines changed: 50 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,17 @@ if(NOT EXECUTORCH_ROOT)
1212
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
1313
endif()
1414

15-
add_compile_options("-Wall" "-Werror")
16-
1715
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)
1816

1917
set(_common_include_directories ${EXECUTORCH_ROOT}/.. ${EXECUTORCH_ROOT}/runtime/core/portable_type/c10)
2018
add_compile_definitions(C10_USING_CUSTOM_GENERATED_MACROS)
2119

20+
21+
# bare metal backend builds
22+
if(EXECUTORCH_BUILD_ARM_BAREMETAL)
23+
24+
add_compile_options("-Wall" "-Werror")
25+
2226
# Third-party folder and Ethos-U driver inclued
2327
set(THIRD_PARTY_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/third-party")
2428
set(DRIVER_ETHOSU_INCLUDE_DIR "${THIRD_PARTY_ROOT}/ethos-u-core-driver/include")
@@ -36,3 +40,47 @@ target_include_directories(
3640
target_include_directories(
3741
executorch_delegate_ethos_u PUBLIC ${DRIVER_ETHOSU_INCLUDE_DIR}
3842
)
43+
44+
# end config for bare metal builds
45+
endif()
46+
47+
48+
# VGF backend builds
49+
if(EXECUTORCH_BUILD_VGF)
50+
51+
# include libvgf
52+
set(LIBVGF_PATH "${EXECUTORCH_ROOT}/examples/arm/ethos-u-scratch/ml-sdk-for-vulkan-manifest/sw/vgf-lib/")
53+
54+
set(VULKAN_THIRD_PARTY_PATH ${EXECUTORCH_ROOT}/backends/vulkan/third-party)
55+
set(VULKAN_HEADERS_PATH ${VULKAN_THIRD_PARTY_PATH}/Vulkan-Headers/include)
56+
set(VOLK_HEADERS_PATH ${VULKAN_THIRD_PARTY_PATH}/volk)
57+
58+
set(LIBVGF_STATIC "${LIBVGF_PATH}/build/src/libvgf.a")
59+
set(LIBVGF_INCLUDE "${LIBVGF_PATH}/include/")
60+
61+
add_library(vgf STATIC IMPORTED)
62+
set_property( TARGET vgf PROPERTY IMPORTED_LOCATION "${LIBVGF_STATIC}" )
63+
target_include_directories(vgf INTERFACE "${LIBVGF_INCLUDE}")
64+
65+
# Add backend delegate for VGF
66+
set(_vgf_backend_sources backends/arm/runtime/VGFBackend.cpp
67+
backends/arm/runtime/VGFSetup.cpp )
68+
69+
# vgf backend
70+
list(TRANSFORM _vgf_backend_sources PREPEND "${EXECUTORCH_ROOT}/")
71+
add_library(vgf_backend ${_vgf_backend_sources})
72+
target_include_directories(
73+
vgf_backend PUBLIC
74+
${_common_include_directories}
75+
${VULKAN_HEADERS_PATH}
76+
${VOLK_HEADERS_PATH}
77+
)
78+
target_compile_options(vgf_backend PRIVATE -DUSE_VULKAN_WRAPPER -DUSE_VULKAN_VOLK)
79+
80+
81+
target_link_libraries(vgf_backend PRIVATE executorch_core)
82+
target_link_libraries(vgf_backend PRIVATE vgf)
83+
executorch_target_link_options_shared_lib(vgf_backend)
84+
85+
# end config for VGF builds
86+
endif()

backends/arm/_passes/match_arg_ranks_pass.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,9 @@ def __init__(self, exported_program):
5454
exir_ops.edge.aten.le.Tensor,
5555
exir_ops.edge.aten.pow.Tensor_Tensor,
5656
exir_ops.edge.aten.where.self,
57+
exir_ops.edge.aten.bitwise_and.Tensor,
58+
exir_ops.edge.aten.bitwise_xor.Tensor,
59+
exir_ops.edge.aten.bitwise_or.Tensor,
5760
]
5861

5962
def _match_op_rank(self, graph_module, node, arg, max_rank):

backends/arm/_passes/replace_scalar_with_tensor_pass.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@
3434
exir_ops.edge.aten.lt.Scalar: exir_ops.edge.aten.lt.Tensor,
3535
exir_ops.edge.aten.le.Scalar: exir_ops.edge.aten.le.Tensor,
3636
exir_ops.edge.aten.ne.Scalar: exir_ops.edge.aten.ne.Tensor,
37+
exir_ops.edge.aten.bitwise_and.Scalar: exir_ops.edge.aten.bitwise_and.Tensor,
38+
exir_ops.edge.aten.bitwise_or.Scalar: exir_ops.edge.aten.bitwise_or.Tensor,
39+
exir_ops.edge.aten.bitwise_xor.Scalar: exir_ops.edge.aten.bitwise_xor.Tensor,
3740
torch.ops.aten.add.Scalar: torch.ops.aten.add.Tensor,
3841
torch.ops.aten.sub.Scalar: torch.ops.aten.sub.Tensor,
3942
torch.ops.aten.mul.Scalar: torch.ops.aten.mul.Tensor,
@@ -46,6 +49,9 @@
4649
torch.ops.aten.lt.Scalar: torch.ops.aten.lt.Tensor,
4750
torch.ops.aten.le.Scalar: torch.ops.aten.le.Tensor,
4851
torch.ops.aten.ne.Scalar: torch.ops.aten.ne.Tensor,
52+
torch.ops.aten.bitwise_and.Scalar: torch.ops.aten.bitwise_and.Tensor,
53+
torch.ops.aten.bitwise_or.Scalar: torch.ops.aten.bitwise_or.Tensor,
54+
torch.ops.aten.bitwise_xor.Scalar: torch.ops.aten.bitwise_xor.Tensor,
4955
}
5056

5157

backends/arm/operator_support/ethos_u55_support.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,9 @@ class EthosU55NotSupported(OperatorSupportBase):
125125
exir_ops.edge.aten.bitwise_and.Tensor,
126126
exir_ops.edge.aten.bitwise_or.Tensor,
127127
exir_ops.edge.aten.bitwise_xor.Tensor,
128+
exir_ops.edge.aten.bitwise_and.Scalar,
129+
exir_ops.edge.aten.bitwise_or.Scalar,
130+
exir_ops.edge.aten.bitwise_xor.Scalar,
128131
exir_ops.edge.aten.bitwise_not,
129132
exir_ops.edge.aten.logical_and.default,
130133
exir_ops.edge.aten.logical_or.default,

backends/arm/operator_support/tosa_supported_operators.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,9 @@ def is_node_supported(
164164
exir_ops.edge.aten.bitwise_and.Tensor,
165165
exir_ops.edge.aten.bitwise_or.Tensor,
166166
exir_ops.edge.aten.bitwise_xor.Tensor,
167+
exir_ops.edge.aten.bitwise_and.Scalar,
168+
exir_ops.edge.aten.bitwise_or.Scalar,
169+
exir_ops.edge.aten.bitwise_xor.Scalar,
167170
exir_ops.edge.aten.expand_copy.default,
168171
exir_ops.edge.aten.cat.default,
169172
exir_ops.edge.aten.ceil.default,

0 commit comments

Comments
 (0)