Skip to content

Commit 4404db8

Browse files
committed
Update
[ghstack-poisoned]
2 parents 28605ca + 57d2e24 commit 4404db8

File tree

150 files changed

+6869
-4176
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

150 files changed

+6869
-4176
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
6fc0ad22f0a07b6f38d138861c56a765d5a9bb02
1+
e7152ff8a6a929a0db7f3f4a72a5b6d471769cd3

.ci/scripts/test_backend_linux.sh

Lines changed: 34 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ SUITE=$1
1010
FLOW=$2
1111
ARTIFACT_DIR=$3
1212

13+
REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv"
14+
1315
echo "Running backend test job for suite $SUITE, flow $FLOW."
1416
echo "Saving job artifacts to $ARTIFACT_DIR."
1517

@@ -18,10 +20,38 @@ eval "$(conda shell.bash hook)"
1820
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
1921
conda activate "${CONDA_ENV}"
2022

21-
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
22-
#source .ci/scripts/setup-vulkan-linux-deps.sh
23+
export PYTHON_EXECUTABLE=python
24+
25+
# CMake options to use, in addition to the defaults.
26+
EXTRA_BUILD_ARGS=""
27+
28+
if [[ "$FLOW" == *qnn* ]]; then
29+
# Setup QNN sdk and deps - note that this is a bit hacky due to the nature of the
30+
# Qualcomm build. TODO (gjcomer) Clean this up once the QNN pybinding integration is
31+
# cleaned up.
32+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake
33+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
34+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
35+
QNN_X86_LIB_DIR=`realpath build-x86/lib/`
36+
QNN_SDK_ROOT="/tmp/qnn/2.28.0.241029"
37+
export LD_LIBRARY_PATH"=$QNN_X86_LIB_DIR:$QNN_SDK_ROOT/lib/x86_64-linux-clang/:${LD_LIBRARY_PATH:-}"
38+
39+
# TODO Get SDK root from install scripts
40+
EXTRA_BUILD_ARGS+=" -DEXECUTORCH_BUILD_QNN=ON -DQNN_SDK_ROOT=$QNN_SDK_ROOT"
41+
fi
42+
43+
if [[ "$FLOW" == *vulkan* ]]; then
44+
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
45+
source .ci/scripts/setup-vulkan-linux-deps.sh
46+
47+
EXTRA_BUILD_ARGS+=" -DEXECUTORCH_BUILD_VULKAN=ON"
48+
fi
2349

2450
# We need the runner to test the built library.
25-
PYTHON_EXECUTABLE=python .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release
51+
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
52+
53+
EXIT_CODE=0
54+
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
2655

27-
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$ARTIFACT_DIR/test_results.csv"
56+
# Generate markdown summary.
57+
python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

.ci/scripts/test_backend_macos.sh

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
SUITE=$1
10+
FLOW=$2
11+
ARTIFACT_DIR=$3
12+
13+
REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv"
14+
15+
echo "Running backend test job for suite $SUITE, flow $FLOW."
16+
echo "Saving job artifacts to $ARTIFACT_DIR."
17+
18+
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
19+
20+
bash .ci/scripts/setup-conda.sh
21+
eval "$(conda shell.bash hook)"
22+
23+
PYTHON_EXECUTABLE=python
24+
${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release
25+
26+
EXIT_CODE=0
27+
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
28+
29+
# Generate markdown summary.
30+
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

.ci/scripts/test_huggingface_optimum_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
369369
), # fails to lower for CoreML
370370
"smollm2-135m": ("HuggingFaceTB/SmolLM2-135M", test_text_generation),
371371
"smollm3-3b": ("HuggingFaceTB/SmolLM3-3B", test_text_generation),
372-
"olmo": ("allenai/OLMo-1B-hf", test_text_generation),
372+
"olmo-1b": ("allenai/OLMo-1B-hf", test_text_generation),
373373
}
374374

375375
_mask_fill_mapping = {

.ci/scripts/unittest-buck2.sh

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,10 @@ set -eux
1111
# TODO: can't query //kernels/prim_ops because of non-buckified stuff in OSS.
1212
buck2 query "//backends/apple/... + //backends/example/... + \
1313
//backends/mediatek/... + //backends/transforms/... + \
14-
//backends/xnnpack/... + //configurations/... + //kernels/aten/... + \
15-
//kernels/optimized/... + //kernels/portable/... + //kernels/quantized/... + \
16-
//kernels/test/... + //runtime/... + //schema/... + //test/... + //util/..."
14+
//backends/xnnpack/... + //configurations/... + //extension/flat_tensor: + \
15+
//extension/llm/runner: + //kernels/aten/... + //kernels/optimized/... + \
16+
//kernels/portable/... + //kernels/quantized/... + //kernels/test/... + \
17+
//runtime/... + //schema/... + //test/... + //util/..."
1718

1819
# TODO: optimized ops are unbuildable because they now use ATen; put
1920
# them back after we can use PyTorch in OSS buck.

.github/workflows/add-unanswered-to-project.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
name: Add Open External Contributor PRs and Issues to PyTorch Org Project 136
22

33
on:
4-
# schedule:
5-
# - cron: '0 * * * *'
64
workflow_dispatch:
7-
5+
pull_request:
6+
paths:
7+
.github/workflows/add-unanswered-to-project.yml
88
jobs:
99
add_to_project:
1010
runs-on: ubuntu-latest

.github/workflows/nightly.yml

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,11 @@ jobs:
4242
strategy:
4343
fail-fast: false
4444
matrix:
45-
flow: [xnnpack, xnnpack_static_int8_per_channel]
45+
flow: [
46+
qnn, qnn_16a16w, qnn_16a8w, qnn_16a4w, qnn_16a4w_block, qnn_8a8w,
47+
vulkan, vulkan_static_int8_per_channel,
48+
xnnpack, xnnpack_dynamic_int8_per_channel, xnnpack_static_int8_per_channel, xnnpack_static_int8_per_tensor
49+
]
4650
suite: [models, operators]
4751
with:
4852
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -53,8 +57,30 @@ jobs:
5357
upload-artifact: test-report-${{ matrix.flow }}-${{ matrix.suite }}
5458
script: |
5559
set -eux
56-
# Intentionally suppressing exit code for now.
57-
# TODO (gjcomer) Remove this when jobs are stable.
58-
EXIT_CODE=0
59-
.ci/scripts/test_backend_linux.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}" || EXIT_CODE=$?
60-
echo "Test run complete with exit code $EXIT_CODE."
60+
61+
source .ci/scripts/test_backend_linux.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}"
62+
63+
backend-test-macos:
64+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
65+
permissions:
66+
id-token: write
67+
contents: read
68+
strategy:
69+
fail-fast: false
70+
matrix:
71+
flow: [coreml, coreml_static_int8]
72+
suite: [models, operators]
73+
with:
74+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
75+
runner: macos-m1-stable
76+
python-version: 3.12
77+
submodules: recursive
78+
timeout: 120
79+
upload-artifact: test-report-${{ matrix.flow }}-${{ matrix.suite }}
80+
script: |
81+
set -eux
82+
83+
# This is needed to get the prebuilt PyTorch wheel from S3
84+
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
85+
86+
source .ci/scripts/test_backend_macos.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}"

.github/workflows/trunk.yml

Lines changed: 104 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -55,102 +55,102 @@ jobs:
5555
# Build and test executorch
5656
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
5757
58-
test-models-arm-zephyr:
59-
name: test-models-arm-zephyr
60-
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
61-
strategy:
62-
matrix:
63-
model: [add, softmax, mv2]
64-
fail-fast: false
65-
with:
66-
runner: linux.2xlarge
67-
docker-image: ci-image:executorch-ubuntu-22.04-zephyr-sdk
68-
submodules: 'recursive'
69-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
70-
timeout: 120
71-
script: |
72-
MODEL_NAME=${{ matrix.model }}
73-
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
74-
conda activate "${CONDA_ENV}"
75-
if [[ ${{ matrix.model}} == "add" ]]; then
76-
SIM_LIMIT_SEC=60
77-
elif [[ ${{ matrix.model}} == "softmax" ]]; then
78-
SIM_LIMIT_SEC=60
79-
elif [[ ${{ matrix.model}} == "mv2" ]]; then
80-
SIM_LIMIT_SEC=5000
81-
else
82-
echo "Failed unsupported model selection ${{ matrix.model }}"
83-
exit 1
84-
fi
85-
86-
source .ci/scripts/utils.sh
87-
source .ci/scripts/zephyr-utils.sh
88-
mkdir -p zephyr_scratch/
89-
cd zephyr_scratch
90-
export ZEPHYR_PROJ_ROOT=$(realpath $(pwd))
91-
export ARM_FVP_TUTORIALS_ROOT=$ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm-fvp-tutorials
92-
93-
# TODO @Bujji: Should see if this can be moved into the docker image itself
94-
download_arm_zephyr_sdk
95-
./zephyr-sdk-0.17.2/setup.sh -c -t arm-zephyr-eabi
96-
cd $ZEPHYR_PROJ_ROOT
97-
setup_zephyr_et_module
98-
99-
# Run setup scripts for Arm FVP and Arm AOT Compilation
100-
cd $ZEPHYR_PROJ_ROOT/modules/lib/executorch
101-
install_executorch "--use-pt-pinned-commit"
102-
.ci/scripts/setup-arm-baremetal-tools.sh --target-toolchain zephyr
103-
source examples/arm/ethos-u-scratch/setup_path.sh
104-
source $ZEPHYR_PROJ_ROOT/zephyr/zephyr-env.sh
105-
106-
# Get the model as PTE
107-
python -m examples.arm.aot_arm_compiler \
108-
--model_name="${MODEL_NAME}" \
109-
--output="${MODEL_NAME}.pte"
110-
111-
# Generate the C-style header
112-
cd $ARM_FVP_TUTORIALS_ROOT
113-
python build_model.py \
114-
--executorch-root $ZEPHYR_PROJ_ROOT/modules/lib/executorch \
115-
--pte-file $ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte \
116-
--output-path $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/src/
117-
118-
cd $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/
119-
120-
# Build the zephyr elf
121-
west build -p always -b mps3/corstone300/fvp -- \
122-
-DET_PTE_FILE_PATH_FOR_SELECTIVE_BUILD=$ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte
123-
124-
# Run the simulation
125-
FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf \
126-
-C mps3_board.visualisation.disable-visualisation=1 \
127-
-C mps3_board.telnetterminal0.start_telnet=0 \
128-
-C mps3_board.uart0.out_file='sim.out' \
129-
-C cpu0.CFGITCMSZ=15 \
130-
-C cpu0.CFGDTCMSZ=15 \
131-
--simlimit ${SIM_LIMIT_SEC}
132-
133-
# Disable exit on error
134-
set +e
135-
# Report failure if any of the ouptut verification checks fail
136-
grep -qF "ERROR" sim.out
137-
exit_status=$? #store 0 if found (failure), 1 if not (success)
138-
if [[ "$exit_status" -eq "0" ]]; then
139-
cat sim.out
140-
set -e
141-
exit 1
142-
fi
143-
144-
# Report fail if simulation does not complete successfully
145-
grep -qF "SUCCESS: Program complete, exiting." sim.out
146-
exit_status=$? #store 0 if found (success), 1 if not (failure)
147-
if [[ "$exit_status" -eq "1" ]]; then
148-
cat sim.out
149-
set -e
150-
exit 1
151-
fi
152-
# Re-enable exit on error
153-
set -e
58+
# test-models-arm-zephyr:
59+
# name: test-models-arm-zephyr
60+
# uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
61+
# strategy:
62+
# matrix:
63+
# model: [add, softmax, mv2]
64+
# fail-fast: false
65+
# with:
66+
# runner: linux.2xlarge
67+
# docker-image: ci-image:executorch-ubuntu-22.04-zephyr-sdk
68+
# submodules: 'recursive'
69+
# ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
70+
# timeout: 120
71+
# script: |
72+
# MODEL_NAME=${{ matrix.model }}
73+
# CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
74+
# conda activate "${CONDA_ENV}"
75+
# if [[ ${{ matrix.model}} == "add" ]]; then
76+
# SIM_LIMIT_SEC=60
77+
# elif [[ ${{ matrix.model}} == "softmax" ]]; then
78+
# SIM_LIMIT_SEC=60
79+
# elif [[ ${{ matrix.model}} == "mv2" ]]; then
80+
# SIM_LIMIT_SEC=5000
81+
# else
82+
# echo "Failed unsupported model selection ${{ matrix.model }}"
83+
# exit 1
84+
# fi
85+
#
86+
# source .ci/scripts/utils.sh
87+
# source .ci/scripts/zephyr-utils.sh
88+
# mkdir -p zephyr_scratch/
89+
# cd zephyr_scratch
90+
# export ZEPHYR_PROJ_ROOT=$(realpath $(pwd))
91+
# export ARM_FVP_TUTORIALS_ROOT=$ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm-fvp-tutorials
92+
#
93+
# # TODO @Bujji: Should see if this can be moved into the docker image itself
94+
# download_arm_zephyr_sdk
95+
# ./zephyr-sdk-0.17.2/setup.sh -c -t arm-zephyr-eabi
96+
# cd $ZEPHYR_PROJ_ROOT
97+
# setup_zephyr_et_module
98+
#
99+
# # Run setup scripts for Arm FVP and Arm AOT Compilation
100+
# cd $ZEPHYR_PROJ_ROOT/modules/lib/executorch
101+
# install_executorch
102+
# .ci/scripts/setup-arm-baremetal-tools.sh --target-toolchain zephyr
103+
# source examples/arm/ethos-u-scratch/setup_path.sh
104+
# source $ZEPHYR_PROJ_ROOT/zephyr/zephyr-env.sh
105+
#
106+
# # Get the model as PTE
107+
# python -m examples.arm.aot_arm_compiler \
108+
# --model_name="${MODEL_NAME}" \
109+
# --output="${MODEL_NAME}.pte"
110+
#
111+
# # Generate the C-style header
112+
# cd $ARM_FVP_TUTORIALS_ROOT
113+
# python build_model.py \
114+
# --executorch-root $ZEPHYR_PROJ_ROOT/modules/lib/executorch \
115+
# --pte-file $ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte \
116+
# --output-path $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/src/
117+
#
118+
# cd $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/
119+
#
120+
# # Build the zephyr elf
121+
# west build -p always -b mps3/corstone300/fvp -- \
122+
# -DET_PTE_FILE_PATH_FOR_SELECTIVE_BUILD=$ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte
123+
#
124+
# # Run the simulation
125+
# FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf \
126+
# -C mps3_board.visualisation.disable-visualisation=1 \
127+
# -C mps3_board.telnetterminal0.start_telnet=0 \
128+
# -C mps3_board.uart0.out_file='sim.out' \
129+
# -C cpu0.CFGITCMSZ=15 \
130+
# -C cpu0.CFGDTCMSZ=15 \
131+
# --simlimit ${SIM_LIMIT_SEC}
132+
#
133+
# # Disable exit on error
134+
# set +e
135+
# # Report failure if any of the ouptut verification checks fail
136+
# grep -qF "ERROR" sim.out
137+
# exit_status=$? #store 0 if found (failure), 1 if not (success)
138+
# if [[ "$exit_status" -eq "0" ]]; then
139+
# cat sim.out
140+
# set -e
141+
# exit 1
142+
# fi
143+
#
144+
# # Report fail if simulation does not complete successfully
145+
# grep -qF "SUCCESS: Program complete, exiting." sim.out
146+
# exit_status=$? #store 0 if found (success), 1 if not (failure)
147+
# if [[ "$exit_status" -eq "1" ]]; then
148+
# cat sim.out
149+
# set -e
150+
# exit 1
151+
# fi
152+
# # Re-enable exit on error
153+
# set -e
154154

155155
test-models-linux-aarch64:
156156
name: test-models-linux-aarch64
@@ -836,14 +836,14 @@ jobs:
836836
strategy:
837837
matrix:
838838
config: [
839-
# XNNPack.
840-
llama3.2-1b|xnnpack|--quantize,
841-
qwen3-0.6b|xnnpack|--quantize,
842-
qwen3-1.7b|xnnpack|--quantize,
843-
gemma3-1b|xnnpack|--quantize,
844-
phi4-mini|xnnpack|--quantize,
845-
smollm2-135m|xnnpack|--quantize,
846-
smollm3-3b|xnnpack|--quantize,
839+
# # XNNPack. (Skipping for now due to intermittent segmentation faults, see https://github.com/huggingface/optimum-executorch/issues/122.)
840+
# llama3.2-1b|xnnpack|--quantize,
841+
# qwen3-0.6b|xnnpack|--quantize,
842+
# qwen3-1.7b|xnnpack|--quantize,
843+
# gemma3-1b|xnnpack|--quantize,
844+
# phi4-mini|xnnpack|--quantize,
845+
# smollm2-135m|xnnpack|--quantize,
846+
# smollm3-3b|xnnpack|--quantize,
847847
# CoreML.
848848
llama3.2-1b|coreml_fp32_gpu|--quantize,
849849
qwen3-0.6b|coreml_fp32_gpu|--quantize,

0 commit comments

Comments
 (0)