Skip to content

Commit fcd48c2

Browse files
authored
Merge branch 'main' into qnn_eval_ci
2 parents b8a930d + ccd9824 commit fcd48c2

File tree

680 files changed

+18021
-9961
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

680 files changed

+18021
-9961
lines changed

.ci/docker/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,13 +54,13 @@ case "${IMAGE_NAME}" in
5454
executorch-ubuntu-22.04-mediatek-sdk)
5555
MEDIATEK_SDK=yes
5656
CLANG_VERSION=12
57-
ANDROID_NDK_VERSION=r27b
57+
ANDROID_NDK_VERSION=r28c
5858
;;
5959
executorch-ubuntu-22.04-clang12-android)
6060
LINTRUNNER=""
6161
CLANG_VERSION=12
6262
# From https://developer.android.com/ndk/downloads
63-
ANDROID_NDK_VERSION=r27b
63+
ANDROID_NDK_VERSION=r28c
6464
;;
6565
*)
6666
echo "Invalid image name ${IMAGE_NAME}"
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
40b02a2dc61bbf901a2df91719f47c98d65368ec
1+
828ae02053a6e0e20a2dfd6e737ba10c6f4dee6b
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
4d4abec80f03cd8fdefe1d9cb3a60d3690cd777e
1+
53a2908a10f414a2f85caa06703a26a40e873869

.ci/scripts/setup-samsung-linux-deps.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ set -ex
1111

1212
download_ai_lite_core() {
1313
API_BASE="https://soc-developer.semiconductor.samsung.com/api/v1/resource/ai-litecore/download"
14-
API_KEY="kn10SoSY3hkC-9Qny5TqD2mnqVrlupv3krnjLeBt5cY"
14+
API_KEY=$SAMSUNG_AI_LITECORE_KEY
1515

1616
VERSION="0.5"
1717
OS_NAME="Ubuntu 22.04"
@@ -52,7 +52,7 @@ download_ai_lite_core() {
5252
install_enn_backend() {
5353
NDK_INSTALLATION_DIR=/opt/ndk
5454
rm -rf "${NDK_INSTALLATION_DIR}" && sudo mkdir -p "${NDK_INSTALLATION_DIR}"
55-
ANDROID_NDK_VERSION=r27b
55+
ANDROID_NDK_VERSION=r28c
5656

5757
# build Exynos backend
5858
export ANDROID_NDK_ROOT=${ANDROID_NDK_ROOT:-/opt/ndk}

.ci/scripts/test-cuda-build.sh

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -exu
9+
10+
CUDA_VERSION=${1:-"12.6"}
11+
12+
echo "=== Testing ExecuTorch CUDA ${CUDA_VERSION} Build ==="
13+
14+
# Function to build and test ExecuTorch with CUDA support
15+
test_executorch_cuda_build() {
16+
local cuda_version=$1
17+
18+
echo "Building ExecuTorch with CUDA ${cuda_version} support..."
19+
echo "ExecuTorch will automatically detect CUDA and install appropriate PyTorch wheel"
20+
21+
# Check available resources before starting
22+
echo "=== System Information ==="
23+
echo "Available memory: $(free -h | grep Mem | awk '{print $2}')"
24+
echo "Available disk space: $(df -h . | tail -1 | awk '{print $4}')"
25+
echo "CPU cores: $(nproc)"
26+
echo "CUDA version check:"
27+
nvcc --version || echo "nvcc not found"
28+
nvidia-smi || echo "nvidia-smi not found"
29+
30+
# Set CMAKE_ARGS to enable CUDA build - ExecuTorch will handle PyTorch installation automatically
31+
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
32+
33+
echo "=== Starting ExecuTorch Installation ==="
34+
# Install ExecuTorch with CUDA support with timeout and error handling
35+
timeout 5400 ./install_executorch.sh || {
36+
local exit_code=$?
37+
echo "ERROR: install_executorch.sh failed with exit code: $exit_code"
38+
if [ $exit_code -eq 124 ]; then
39+
echo "ERROR: Installation timed out after 90 minutes"
40+
fi
41+
exit $exit_code
42+
}
43+
44+
echo "SUCCESS: ExecuTorch CUDA build completed"
45+
46+
# Verify the installation
47+
echo "=== Verifying ExecuTorch CUDA Installation ==="
48+
49+
# Test that ExecuTorch was built successfully
50+
python -c "
51+
import executorch
52+
print('SUCCESS: ExecuTorch imported successfully')
53+
"
54+
55+
# Test CUDA availability and show details
56+
python -c "
57+
try:
58+
import torch
59+
print('INFO: PyTorch version:', torch.__version__)
60+
print('INFO: CUDA available:', torch.cuda.is_available())
61+
62+
if torch.cuda.is_available():
63+
print('SUCCESS: CUDA is available for ExecuTorch')
64+
print('INFO: CUDA version:', torch.version.cuda)
65+
print('INFO: GPU device count:', torch.cuda.device_count())
66+
print('INFO: Current GPU device:', torch.cuda.current_device())
67+
print('INFO: GPU device name:', torch.cuda.get_device_name())
68+
69+
# Test basic CUDA tensor operation
70+
device = torch.device('cuda')
71+
x = torch.randn(10, 10).to(device)
72+
y = torch.randn(10, 10).to(device)
73+
z = torch.mm(x, y)
74+
print('SUCCESS: CUDA tensor operation completed on device:', z.device)
75+
print('INFO: Result tensor shape:', z.shape)
76+
77+
print('SUCCESS: ExecuTorch CUDA integration verified')
78+
else:
79+
print('WARNING: CUDA not detected, but ExecuTorch built successfully')
80+
exit(1)
81+
except Exception as e:
82+
print('ERROR: ExecuTorch CUDA test failed:', e)
83+
exit(1)
84+
"
85+
86+
echo "SUCCESS: ExecuTorch CUDA ${cuda_version} build and verification completed successfully"
87+
}
88+
89+
# Main execution
90+
echo "Current working directory: $(pwd)"
91+
echo "Directory contents:"
92+
ls -la
93+
94+
# Run the CUDA build test
95+
test_executorch_cuda_build "${CUDA_VERSION}"

.ci/scripts/test_backend_linux.sh

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,17 @@ if [[ "$FLOW" == *qnn* ]]; then
3939
fi
4040

4141
if [[ "$FLOW" == *vulkan* ]]; then
42-
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
42+
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate.
4343
source .ci/scripts/setup-vulkan-linux-deps.sh
4444

4545
EXTRA_BUILD_ARGS+=" -DEXECUTORCH_BUILD_VULKAN=ON"
4646
fi
4747

48+
if [[ "$FLOW" == *arm* ]]; then
49+
# Setup ARM deps.
50+
.ci/scripts/setup-arm-baremetal-tools.sh
51+
fi
52+
4853
# We need the runner to test the built library.
4954
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
5055

.ci/scripts/test_llava.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ run_and_verify() {
149149

150150
# verify result.txt
151151
RESULT=$(cat result.txt)
152-
EXPECTED_PREFIX="ASSISTANT: image captures a basketball game in progress, with"
152+
EXPECTED_PREFIX="ASSISTANT: The image captures a basketball game in progress, with"
153153

154154
if [[ "${RESULT}" == *"${EXPECTED_PREFIX}"* ]]; then
155155
echo "Expected result prefix: ${EXPECTED_PREFIX}"

.ci/scripts/test_model.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,13 +131,13 @@ test_model_with_xnnpack() {
131131
return 0
132132
fi
133133

134-
# Delegation
134+
# Delegation and test with pybindings
135135
if [[ ${WITH_QUANTIZATION} == true ]]; then
136136
SUFFIX="q8"
137-
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize
137+
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize --test_after_export
138138
else
139139
SUFFIX="fp32"
140-
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate
140+
"${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --test_after_export
141141
fi
142142

143143
OUTPUT_MODEL_PATH="${MODEL_NAME}_xnnpack_${SUFFIX}.pte"

.ci/scripts/test_wheel_package_qnn.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ run_core_tests () {
145145
echo "=== [$LABEL] Import smoke tests ==="
146146
"$PYBIN" -c "import executorch; print('executorch imported successfully')"
147147
"$PYBIN" -c "import executorch.backends.qualcomm; print('executorch.backends.qualcomm imported successfully')"
148+
"$PYBIN" -c "from executorch.export.target_recipes import get_android_recipe; recipe = get_android_recipe('android-arm64-snapdragon-fp16'); print(f'executorch.export.target_recipes imported successfully: {recipe}')"
148149

149150
echo "=== [$LABEL] List installed executorch/backends/qualcomm/python ==="
150151
local SITE_DIR

.ci/scripts/wheel/test_base.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,18 @@ class ModelTest:
4141

4242

4343
def run_tests(model_tests: List[ModelTest]) -> None:
44+
# Test that we can import the portable_lib module - verifies RPATH is correct
45+
print("Testing portable_lib import...")
46+
try:
47+
from executorch.extension.pybindings._portable_lib import ( # noqa: F401
48+
_load_for_executorch,
49+
)
50+
51+
print("✓ Successfully imported _load_for_executorch from portable_lib")
52+
except ImportError as e:
53+
print(f"✗ Failed to import portable_lib: {e}")
54+
raise
55+
4456
# Why are we doing this envvar shenanigans? Since we build the testers, which
4557
# uses buck, we cannot run as root. This is a sneaky of getting around that
4658
# test.

0 commit comments

Comments
 (0)