Skip to content

Commit 7e4be82

Browse files
committed
Update
[ghstack-poisoned]
2 parents bd7694b + ea0fff3 commit 7e4be82

File tree

99 files changed

+3937
-3324
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

99 files changed

+3937
-3324
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
6fc0ad22f0a07b6f38d138861c56a765d5a9bb02
1+
e7152ff8a6a929a0db7f3f4a72a5b6d471769cd3

.ci/scripts/test_backend_linux.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,6 @@ source .ci/scripts/setup-vulkan-linux-deps.sh
2525
EXTRA_BUILD_ARGS="-DEXECUTORCH_BUILD_VULKAN=ON"
2626

2727
# We need the runner to test the built library.
28-
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release
28+
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
2929

3030
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$ARTIFACT_DIR/test_results.csv"

.ci/scripts/test_huggingface_optimum_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
369369
), # fails to lower for CoreML
370370
"smollm2-135m": ("HuggingFaceTB/SmolLM2-135M", test_text_generation),
371371
"smollm3-3b": ("HuggingFaceTB/SmolLM3-3B", test_text_generation),
372-
"olmo": ("allenai/OLMo-1B-hf", test_text_generation),
372+
"olmo-1b": ("allenai/OLMo-1B-hf", test_text_generation),
373373
}
374374

375375
_mask_fill_mapping = {

.github/workflows/trunk.yml

Lines changed: 104 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -55,102 +55,102 @@ jobs:
5555
# Build and test executorch
5656
PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
5757
58-
test-models-arm-zephyr:
59-
name: test-models-arm-zephyr
60-
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
61-
strategy:
62-
matrix:
63-
model: [add, softmax, mv2]
64-
fail-fast: false
65-
with:
66-
runner: linux.2xlarge
67-
docker-image: ci-image:executorch-ubuntu-22.04-zephyr-sdk
68-
submodules: 'recursive'
69-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
70-
timeout: 120
71-
script: |
72-
MODEL_NAME=${{ matrix.model }}
73-
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
74-
conda activate "${CONDA_ENV}"
75-
if [[ ${{ matrix.model}} == "add" ]]; then
76-
SIM_LIMIT_SEC=60
77-
elif [[ ${{ matrix.model}} == "softmax" ]]; then
78-
SIM_LIMIT_SEC=60
79-
elif [[ ${{ matrix.model}} == "mv2" ]]; then
80-
SIM_LIMIT_SEC=5000
81-
else
82-
echo "Failed unsupported model selection ${{ matrix.model }}"
83-
exit 1
84-
fi
85-
86-
source .ci/scripts/utils.sh
87-
source .ci/scripts/zephyr-utils.sh
88-
mkdir -p zephyr_scratch/
89-
cd zephyr_scratch
90-
export ZEPHYR_PROJ_ROOT=$(realpath $(pwd))
91-
export ARM_FVP_TUTORIALS_ROOT=$ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm-fvp-tutorials
92-
93-
# TODO @Bujji: Should see if this can be moved into the docker image itself
94-
download_arm_zephyr_sdk
95-
./zephyr-sdk-0.17.2/setup.sh -c -t arm-zephyr-eabi
96-
cd $ZEPHYR_PROJ_ROOT
97-
setup_zephyr_et_module
98-
99-
# Run setup scripts for Arm FVP and Arm AOT Compilation
100-
cd $ZEPHYR_PROJ_ROOT/modules/lib/executorch
101-
install_executorch "--use-pt-pinned-commit"
102-
.ci/scripts/setup-arm-baremetal-tools.sh --target-toolchain zephyr
103-
source examples/arm/ethos-u-scratch/setup_path.sh
104-
source $ZEPHYR_PROJ_ROOT/zephyr/zephyr-env.sh
105-
106-
# Get the model as PTE
107-
python -m examples.arm.aot_arm_compiler \
108-
--model_name="${MODEL_NAME}" \
109-
--output="${MODEL_NAME}.pte"
110-
111-
# Generate the C-style header
112-
cd $ARM_FVP_TUTORIALS_ROOT
113-
python build_model.py \
114-
--executorch-root $ZEPHYR_PROJ_ROOT/modules/lib/executorch \
115-
--pte-file $ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte \
116-
--output-path $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/src/
117-
118-
cd $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/
119-
120-
# Build the zephyr elf
121-
west build -p always -b mps3/corstone300/fvp -- \
122-
-DET_PTE_FILE_PATH_FOR_SELECTIVE_BUILD=$ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte
123-
124-
# Run the simulation
125-
FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf \
126-
-C mps3_board.visualisation.disable-visualisation=1 \
127-
-C mps3_board.telnetterminal0.start_telnet=0 \
128-
-C mps3_board.uart0.out_file='sim.out' \
129-
-C cpu0.CFGITCMSZ=15 \
130-
-C cpu0.CFGDTCMSZ=15 \
131-
--simlimit ${SIM_LIMIT_SEC}
132-
133-
# Disable exit on error
134-
set +e
135-
# Report failure if any of the ouptut verification checks fail
136-
grep -qF "ERROR" sim.out
137-
exit_status=$? #store 0 if found (failure), 1 if not (success)
138-
if [[ "$exit_status" -eq "0" ]]; then
139-
cat sim.out
140-
set -e
141-
exit 1
142-
fi
143-
144-
# Report fail if simulation does not complete successfully
145-
grep -qF "SUCCESS: Program complete, exiting." sim.out
146-
exit_status=$? #store 0 if found (success), 1 if not (failure)
147-
if [[ "$exit_status" -eq "1" ]]; then
148-
cat sim.out
149-
set -e
150-
exit 1
151-
fi
152-
# Re-enable exit on error
153-
set -e
58+
# test-models-arm-zephyr:
59+
# name: test-models-arm-zephyr
60+
# uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
61+
# strategy:
62+
# matrix:
63+
# model: [add, softmax, mv2]
64+
# fail-fast: false
65+
# with:
66+
# runner: linux.2xlarge
67+
# docker-image: ci-image:executorch-ubuntu-22.04-zephyr-sdk
68+
# submodules: 'recursive'
69+
# ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
70+
# timeout: 120
71+
# script: |
72+
# MODEL_NAME=${{ matrix.model }}
73+
# CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
74+
# conda activate "${CONDA_ENV}"
75+
# if [[ ${{ matrix.model}} == "add" ]]; then
76+
# SIM_LIMIT_SEC=60
77+
# elif [[ ${{ matrix.model}} == "softmax" ]]; then
78+
# SIM_LIMIT_SEC=60
79+
# elif [[ ${{ matrix.model}} == "mv2" ]]; then
80+
# SIM_LIMIT_SEC=5000
81+
# else
82+
# echo "Failed unsupported model selection ${{ matrix.model }}"
83+
# exit 1
84+
# fi
85+
#
86+
# source .ci/scripts/utils.sh
87+
# source .ci/scripts/zephyr-utils.sh
88+
# mkdir -p zephyr_scratch/
89+
# cd zephyr_scratch
90+
# export ZEPHYR_PROJ_ROOT=$(realpath $(pwd))
91+
# export ARM_FVP_TUTORIALS_ROOT=$ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm-fvp-tutorials
92+
#
93+
# # TODO @Bujji: Should see if this can be moved into the docker image itself
94+
# download_arm_zephyr_sdk
95+
# ./zephyr-sdk-0.17.2/setup.sh -c -t arm-zephyr-eabi
96+
# cd $ZEPHYR_PROJ_ROOT
97+
# setup_zephyr_et_module
98+
#
99+
# # Run setup scripts for Arm FVP and Arm AOT Compilation
100+
# cd $ZEPHYR_PROJ_ROOT/modules/lib/executorch
101+
# install_executorch
102+
# .ci/scripts/setup-arm-baremetal-tools.sh --target-toolchain zephyr
103+
# source examples/arm/ethos-u-scratch/setup_path.sh
104+
# source $ZEPHYR_PROJ_ROOT/zephyr/zephyr-env.sh
105+
#
106+
# # Get the model as PTE
107+
# python -m examples.arm.aot_arm_compiler \
108+
# --model_name="${MODEL_NAME}" \
109+
# --output="${MODEL_NAME}.pte"
110+
#
111+
# # Generate the C-style header
112+
# cd $ARM_FVP_TUTORIALS_ROOT
113+
# python build_model.py \
114+
# --executorch-root $ZEPHYR_PROJ_ROOT/modules/lib/executorch \
115+
# --pte-file $ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte \
116+
# --output-path $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/src/
117+
#
118+
# cd $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/
119+
#
120+
# # Build the zephyr elf
121+
# west build -p always -b mps3/corstone300/fvp -- \
122+
# -DET_PTE_FILE_PATH_FOR_SELECTIVE_BUILD=$ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte
123+
#
124+
# # Run the simulation
125+
# FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf \
126+
# -C mps3_board.visualisation.disable-visualisation=1 \
127+
# -C mps3_board.telnetterminal0.start_telnet=0 \
128+
# -C mps3_board.uart0.out_file='sim.out' \
129+
# -C cpu0.CFGITCMSZ=15 \
130+
# -C cpu0.CFGDTCMSZ=15 \
131+
# --simlimit ${SIM_LIMIT_SEC}
132+
#
133+
# # Disable exit on error
134+
# set +e
135+
# # Report failure if any of the ouptut verification checks fail
136+
# grep -qF "ERROR" sim.out
137+
# exit_status=$? #store 0 if found (failure), 1 if not (success)
138+
# if [[ "$exit_status" -eq "0" ]]; then
139+
# cat sim.out
140+
# set -e
141+
# exit 1
142+
# fi
143+
#
144+
# # Report fail if simulation does not complete successfully
145+
# grep -qF "SUCCESS: Program complete, exiting." sim.out
146+
# exit_status=$? #store 0 if found (success), 1 if not (failure)
147+
# if [[ "$exit_status" -eq "1" ]]; then
148+
# cat sim.out
149+
# set -e
150+
# exit 1
151+
# fi
152+
# # Re-enable exit on error
153+
# set -e
154154

155155
test-models-linux-aarch64:
156156
name: test-models-linux-aarch64
@@ -836,14 +836,14 @@ jobs:
836836
strategy:
837837
matrix:
838838
config: [
839-
# XNNPack.
840-
llama3.2-1b|xnnpack|--quantize,
841-
qwen3-0.6b|xnnpack|--quantize,
842-
qwen3-1.7b|xnnpack|--quantize,
843-
gemma3-1b|xnnpack|--quantize,
844-
phi4-mini|xnnpack|--quantize,
845-
smollm2-135m|xnnpack|--quantize,
846-
smollm3-3b|xnnpack|--quantize,
839+
# # XNNPack. (Skipping for now due to intermittent segmentation faults, see https://github.com/huggingface/optimum-executorch/issues/122.)
840+
# llama3.2-1b|xnnpack|--quantize,
841+
# qwen3-0.6b|xnnpack|--quantize,
842+
# qwen3-1.7b|xnnpack|--quantize,
843+
# gemma3-1b|xnnpack|--quantize,
844+
# phi4-mini|xnnpack|--quantize,
845+
# smollm2-135m|xnnpack|--quantize,
846+
# smollm3-3b|xnnpack|--quantize,
847847
# CoreML.
848848
llama3.2-1b|coreml_fp32_gpu|--quantize,
849849
qwen3-0.6b|coreml_fp32_gpu|--quantize,

CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -826,6 +826,10 @@ if(EXECUTORCH_BUILD_PYBIND)
826826
list(APPEND _dep_libs openvino_backend)
827827
endif()
828828

829+
if(EXECUTORCH_BUILD_QNN)
830+
list(APPEND _dep_libs qnn_executorch_backend)
831+
endif()
832+
829833
if(EXECUTORCH_BUILD_XNNPACK)
830834
# need to explicitly specify XNNPACK and xnnpack-microkernels-prod here
831835
# otherwise uses XNNPACK and microkernel-prod symbols from libtorch_cpu

backends/arm/quantizer/quantization_annotator.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,10 @@ def _match_pattern(
339339
torch.ops.aten.unflatten.int,
340340
torch.ops.aten.index_select.default,
341341
torch.ops.aten.index.Tensor,
342+
# Neg operator flips the range, but keps the magnitude the same.
343+
# That is why we force it to use the same qparams and avoid
344+
# dequant -> neg -> requant chain.
345+
torch.ops.aten.neg.default,
342346
]
343347

344348
_one_to_one_shared_input_or_input_act_qspec = [
@@ -540,9 +544,6 @@ def any_or_hardtanh_min_zero(n: Node):
540544
)
541545
]
542546
quant_properties.quant_output = _QuantProperty(0, shared_qspec) # type: ignore[arg-type]
543-
elif node.target in (torch.ops.aten.neg.default,):
544-
quant_properties.quant_inputs = [_QuantProperty(0, input_act_qspec)]
545-
quant_properties.quant_output = _QuantProperty(0, input_act_qspec)
546547
elif node.target in _one_to_one:
547548
quant_properties.quant_inputs = [_QuantProperty(0, input_act_qspec)]
548549
quant_properties.quant_output = _QuantProperty(0, output_act_qspec)

0 commit comments

Comments
 (0)