Skip to content

Commit e1a3133

Browse files
committed
port get-changed-files from pytorch/pytorch
[ghstack-poisoned]
2 parents aee50cc + e6b9111 commit e1a3133

File tree

85 files changed

+2121
-170
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

85 files changed

+2121
-170
lines changed
Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
#!/bin/bash
2+
# === CI Wheel Build & Test Script ===
3+
4+
# Exit immediately on error, print each command, and capture all output to build.log
5+
set -e
6+
set -x
7+
exec > >(tee -i build.log) 2>&1
8+
9+
# Save repo root
10+
REPO_ROOT=$(pwd)
11+
12+
# ----------------------------
13+
# Dynamically create script_qnn_wheel_test.py
14+
# ----------------------------
15+
cat > "/tmp/script_qnn_wheel_test.py" << 'EOF'
16+
# pyre-ignore-all-errors
17+
import argparse
18+
19+
import torch
20+
from executorch.backends.qualcomm.quantizer.quantizer import QnnQuantizer
21+
from executorch.backends.qualcomm.utils.utils import (
22+
generate_htp_compiler_spec,
23+
generate_qnn_executorch_compiler_spec,
24+
get_soc_to_chipset_map,
25+
to_edge_transform_and_lower_to_qnn,
26+
)
27+
from executorch.exir.backend.utils import format_delegated_graph
28+
from executorch.examples.models.model_factory import EagerModelFactory
29+
from executorch.exir.capture._config import ExecutorchBackendConfig
30+
from executorch.extension.export_util.utils import save_pte_program
31+
from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e, prepare_qat_pt2e
32+
33+
def main() -> None:
34+
parser = argparse.ArgumentParser()
35+
parser.add_argument("-f", "--output_folder", type=str, default="", help="The folder to store the exported program")
36+
parser.add_argument("--soc", type=str, default="SM8650", help="Specify the SoC model.")
37+
parser.add_argument("-q", "--quantization", choices=["ptq", "qat"], help="Run post-traininig quantization.")
38+
args = parser.parse_args()
39+
40+
class LinearModule(torch.nn.Module):
41+
def __init__(self):
42+
super().__init__()
43+
self.linear = torch.nn.Linear(3, 3)
44+
def forward(self, arg):
45+
return self.linear(arg)
46+
def get_example_inputs(self):
47+
return (torch.randn(3, 3),)
48+
49+
model = LinearModule()
50+
example_inputs = model.get_example_inputs()
51+
52+
if args.quantization:
53+
quantizer = QnnQuantizer()
54+
m = torch.export.export(model.eval(), example_inputs, strict=True).module()
55+
if args.quantization == "qat":
56+
m = prepare_qat_pt2e(m, quantizer)
57+
m(*example_inputs)
58+
elif args.quantization == "ptq":
59+
m = prepare_pt2e(m, quantizer)
60+
m(*example_inputs)
61+
m = convert_pt2e(m)
62+
else:
63+
m = model
64+
65+
use_fp16 = True if args.quantization is None else False
66+
backend_options = generate_htp_compiler_spec(use_fp16=use_fp16)
67+
compile_spec = generate_qnn_executorch_compiler_spec(
68+
soc_model=get_soc_to_chipset_map()[args.soc],
69+
backend_options=backend_options,
70+
)
71+
delegated_program = to_edge_transform_and_lower_to_qnn(m, example_inputs, compile_spec)
72+
output_graph = format_delegated_graph(delegated_program.exported_program().graph_module)
73+
# Ensure QnnBackend is in the output graph
74+
assert "QnnBackend" in output_graph
75+
executorch_program = delegated_program.to_executorch(
76+
config=ExecutorchBackendConfig(extract_delegate_segments=False)
77+
)
78+
save_pte_program(executorch_program, "linear", args.output_folder)
79+
80+
if __name__ == "__main__":
81+
main()
82+
EOF
83+
84+
# ----------------------------
85+
# Wheel build and .so checks
86+
# ----------------------------
87+
echo "=== Building Wheel Package ==="
88+
source .ci/scripts/utils.sh
89+
install_executorch
90+
EXECUTORCH_BUILDING_WHEEL=1 python setup.py bdist_wheel
91+
unset EXECUTORCH_BUILDING_WHEEL
92+
93+
WHEEL_FILE=$(ls dist/*.whl | head -n 1)
94+
echo "Found wheel: $WHEEL_FILE"
95+
96+
PYTHON_VERSION=$1
97+
# ----------------------------
98+
# Check wheel does NOT contain qualcomm/sdk
99+
# ----------------------------
100+
echo "Checking wheel does not contain qualcomm/sdk..."
101+
SDK_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep "executorch/backends/qualcomm/sdk" || true)
102+
if [ -n "$SDK_FILES" ]; then
103+
echo "ERROR: Wheel package contains unexpected qualcomm/sdk files:"
104+
echo "$SDK_FILES"
105+
exit 1
106+
else
107+
echo "OK: No qualcomm/sdk files found in wheel"
108+
fi
109+
110+
# ----------------------------
111+
# Check .so files in the wheel
112+
# ----------------------------
113+
echo "Checking for .so files inside the wheel..."
114+
WHEEL_SO_FILES=$(unzip -l "$WHEEL_FILE" | awk '{print $4}' | grep "executorch/backends/qualcomm/python" || true)
115+
if [ -z "$WHEEL_SO_FILES" ]; then
116+
echo "ERROR: No .so files found in wheel under executorch/backends/qualcomm/python"
117+
exit 1
118+
else
119+
echo "Wheel contains the following .so files:"
120+
echo "$WHEEL_SO_FILES"
121+
fi
122+
123+
# ----------------------------
124+
# Helpers
125+
# ----------------------------
126+
get_site_packages_dir () {
127+
local PYBIN="$1"
128+
"$PYBIN" - <<'PY'
129+
import sysconfig, sys
130+
print(sysconfig.get_paths().get("purelib") or sysconfig.get_paths().get("platlib"))
131+
PY
132+
}
133+
134+
run_core_tests () {
135+
local PYBIN="$1" # path to python
136+
local PIPBIN="$2" # path to pip
137+
local LABEL="$3" # label to print (conda/venv)
138+
139+
echo "=== [$LABEL] Installing wheel & deps ==="
140+
"$PIPBIN" install --upgrade pip
141+
"$PIPBIN" install "$WHEEL_FILE"
142+
"$PIPBIN" install torch=="2.9.0.dev20250906" --index-url "https://download.pytorch.org/whl/nightly/cpu"
143+
"$PIPBIN" install --pre torchao --index-url "https://download.pytorch.org/whl/nightly/cpu"
144+
145+
echo "=== [$LABEL] Import smoke tests ==="
146+
"$PYBIN" -c "import executorch; print('executorch imported successfully')"
147+
"$PYBIN" -c "import executorch.backends.qualcomm; print('executorch.backends.qualcomm imported successfully')"
148+
149+
echo "=== [$LABEL] List installed executorch/backends/qualcomm/python ==="
150+
local SITE_DIR
151+
SITE_DIR="$(get_site_packages_dir "$PYBIN")"
152+
local SO_DIR="$SITE_DIR/executorch/backends/qualcomm/python"
153+
ls -l "$SO_DIR" || echo "Folder does not exist!"
154+
155+
echo "=== [$LABEL] Run export script to generate linear.pte ==="
156+
(cd "$REPO_ROOT" && "$PYBIN" "/tmp/script_qnn_wheel_test.py")
157+
158+
if [ -f "$REPO_ROOT/linear.pte" ]; then
159+
echo "[$LABEL] Model file linear.pte successfully created"
160+
else
161+
echo "ERROR: [$LABEL] Model file linear.pte was not created"
162+
exit 1
163+
fi
164+
}
165+
166+
# ----------------------------
167+
# Conda environment setup & tests
168+
# ----------------------------
169+
echo "=== Testing in Conda env ==="
170+
TEMP_ENV_DIR=$(mktemp -d)
171+
echo "Using temporary directory for conda: $TEMP_ENV_DIR"
172+
conda create -y -p "$TEMP_ENV_DIR/env" python=$PYTHON_VERSION
173+
# derive python/pip paths inside the conda env
174+
CONDA_PY="$TEMP_ENV_DIR/env/bin/python"
175+
CONDA_PIP="$TEMP_ENV_DIR/env/bin/pip"
176+
# Some images require conda run; keep pip/python direct to simplify path math
177+
run_core_tests "$CONDA_PY" "$CONDA_PIP" "conda"
178+
179+
# Cleanup conda env
180+
conda env remove -p "$TEMP_ENV_DIR/env" -y || true
181+
rm -rf "$TEMP_ENV_DIR"
182+
183+
# ----------------------------
184+
# Python venv setup & tests
185+
# ----------------------------
186+
echo "=== Testing in Python venv ==="
187+
TEMP_VENV_DIR=$(mktemp -d)
188+
echo "Using temporary directory for venv: $TEMP_VENV_DIR"
189+
python3 -m venv "$TEMP_VENV_DIR/venv"
190+
VENV_PY="$TEMP_VENV_DIR/venv/bin/python"
191+
VENV_PIP="$TEMP_VENV_DIR/venv/bin/pip"
192+
193+
# Ensure venv has wheel/build basics if needed
194+
"$VENV_PIP" install --upgrade pip
195+
196+
run_core_tests "$VENV_PY" "$VENV_PIP" "venv"
197+
198+
# Cleanup venv
199+
rm -rf "$TEMP_VENV_DIR"
200+
201+
echo "=== All tests completed! ==="
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
name: Get Changed Files
2+
3+
on:
4+
workflow_call:
5+
outputs:
6+
changed-files:
7+
description: "List of changed files (space-separated) or '*' if not in a PR"
8+
value: ${{ jobs.get-changed-files.outputs.changed-files }}
9+
10+
jobs:
11+
get-changed-files:
12+
runs-on: ubuntu-latest
13+
outputs:
14+
changed-files: ${{ steps.get-files.outputs.changed-files }}
15+
16+
steps:
17+
- name: Get changed files
18+
id: get-files
19+
env:
20+
GH_TOKEN: ${{ github.token }}
21+
run: |
22+
# Check if we're in a pull request context
23+
if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "pull_request_target" ]; then
24+
echo "Running in PR context"
25+
26+
# Get the PR number from the github context
27+
PR_NUMBER="${{ github.event.number }}"
28+
29+
# Use gh CLI to get changed files in the PR with explicit repo
30+
CHANGED_FILES=$(gh api repos/${{ github.repository }}/pulls/$PR_NUMBER/files --paginate --jq '.[] | select(.status != "removed") | .filename' | tr '\n' ' ' | sed 's/ $//')
31+
32+
if [ -z "$CHANGED_FILES" ]; then
33+
echo "No changed files found, setting to '*'"
34+
CHANGED_FILES="*"
35+
fi
36+
37+
echo "Changed files: $CHANGED_FILES"
38+
echo "changed-files=$CHANGED_FILES" >> "$GITHUB_OUTPUT"
39+
40+
else
41+
echo "Not in PR context, setting changed files to '*'"
42+
echo "changed-files=*" >> "$GITHUB_OUTPUT"
43+
fi

.github/workflows/lint.yml

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,27 @@ concurrency:
1515
cancel-in-progress: true
1616

1717
jobs:
18+
get-changed-files:
19+
if: github.repository_owner == 'pytorch'
20+
name: Get changed files
21+
uses: ./.github/workflows/_get-changed-files.yml
22+
1823
lintrunner-mypy:
1924
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
25+
needs: [get-changed-files]
2026
permissions:
2127
id-token: write
2228
contents: read
29+
if: |
30+
github.repository_owner == 'pytorch' && (
31+
needs.get-changed-files.outputs.changed-files == '*' ||
32+
contains(needs.get-changed-files.outputs.changed-files, '.py') ||
33+
contains(needs.get-changed-files.outputs.changed-files, '.pyi')
34+
)
2335
with:
2436
runner: linux.2xlarge
2537
docker-image: ci-image:executorch-ubuntu-22.04-linter
26-
submodules: 'recursive'
38+
submodules: true
2739
fetch-depth: 0
2840
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
2941
timeout: 90
@@ -46,7 +58,8 @@ jobs:
4658
fi
4759
4860
RC=0
49-
# Run lintrunner on all files
61+
# Run lintrunner on all files. pytorch/pytorch notes that mypy
62+
# in particular needs this.
5063
if ! lintrunner --force-color --all-files --take MYPY --tee-json=lint.json 2> /dev/null; then
5164
echo ""
5265
echo -e "\e[1m\e[36mYou can reproduce these results locally by using \`lintrunner --take MYPY\`. (If you don't get the same results, run \'lintrunner init\' to update your local linter)\e[0m"
@@ -63,13 +76,14 @@ jobs:
6376
6477
lintrunner:
6578
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
79+
needs: [get-changed-files]
6680
permissions:
6781
id-token: write
6882
contents: read
6983
with:
7084
runner: linux.2xlarge
7185
docker-image: ci-image:executorch-ubuntu-22.04-linter
72-
submodules: 'recursive'
86+
submodules: false
7387
fetch-depth: 0
7488
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
7589
timeout: 90
@@ -92,8 +106,12 @@ jobs:
92106
fi
93107
94108
RC=0
95-
# Run lintrunner on all files
96-
if ! lintrunner --force-color --all-files --skip MYPY --tee-json=lint.json 2> /dev/null; then
109+
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
110+
if [ "$CHANGED_FILES" = '*' ]; then
111+
LINTRUNNER_FILES="--all-files"
112+
else
113+
LINTRUNNER_FILES="${CHANGED_FILES}"
114+
if ! lintrunner --force-color ${LINTRUNNER_FILES} --skip MYPY --tee-json=lint.json 2> /dev/null; then
97115
echo ""
98116
echo -e "\e[1m\e[36mYou can reproduce these results locally by using \`lintrunner --skip MYPY\`. (If you don't get the same results, run \'lintrunner init\' to update your local linter)\e[0m"
99117
echo -e "\e[1m\e[36mSee https://github.com/pytorch/pytorch/wiki/lintrunner for setup instructions.\e[0m"

.github/workflows/pull.yml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,33 @@ concurrency:
1313
cancel-in-progress: true
1414

1515
jobs:
16+
test-qnn-wheel-packages-linux:
17+
name: test-qnn-wheel-packages-linux
18+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
19+
permissions:
20+
id-token: write
21+
contents: read
22+
strategy:
23+
fail-fast: false
24+
matrix:
25+
python-version: [ "3.10", "3.11", "3.12" ]
26+
with:
27+
runner: linux.2xlarge
28+
docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk
29+
submodules: 'recursive'
30+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
31+
timeout: 180
32+
script: |
33+
# The generic Linux job chooses to use base env, not the one setup by the image
34+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
35+
conda activate "${CONDA_ENV}"
36+
37+
# Create a clean env for each python version
38+
conda create -y -n test_env_${{ matrix.python-version }} python=${{ matrix.python-version }}
39+
conda activate test_env_${{ matrix.python-version }}
40+
41+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_wheel_package_qnn.sh "${{ matrix.python-version }}"
42+
1643
test-setup-linux-gcc:
1744
name: test-setup-linux-gcc
1845
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ cmake-android-out/
1616
cmake-ios-out/
1717
cmake-out*
1818
cmake-out-android/
19+
build-android/
20+
build-x86/
1921
dist/
2022
ethos-u-scratch/
2123
executorch.egg-info

backends/arm/quantizer/arm_quantizer.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -105,15 +105,27 @@ def get_symmetric_quantization_config(
105105
# Determine the right observer/fake-quant constructor
106106
if is_qat:
107107
if is_per_channel:
108-
weight_observer_or_fake_quant_ctr = PerChannelMinMaxObserver
108+
weight_observer_or_fake_quant_ctr = FakeQuantize.with_args(
109+
observer=PerChannelMinMaxObserver,
110+
quant_min=weight_qmin,
111+
quant_max=weight_qmax,
112+
dtype=torch.qint8,
113+
qscheme=torch.per_channel_symmetric,
114+
reduce_range=False,
115+
ch_axis=0,
116+
**extra_args,
117+
)
109118
else:
110119
# Set plain fake-quant with true min/max
111-
weight_observer_or_fake_quant_ctr = FakeQuantize
120+
weight_observer_or_fake_quant_ctr = FakeQuantize.with_args(**extra_args)
112121
else:
113122
# PTQ: set min/max observer
114123
weight_observer_or_fake_quant_ctr = (
115124
PerChannelMinMaxObserver if is_per_channel else MinMaxObserver
116125
)
126+
weight_observer_or_fake_quant_ctr = weight_observer_or_fake_quant_ctr.with_args(
127+
**extra_args,
128+
)
117129

118130
weight_quantization_spec = QuantizationSpec(
119131
dtype=torch.int8,
@@ -122,9 +134,7 @@ def get_symmetric_quantization_config(
122134
qscheme=weight_qscheme,
123135
ch_axis=0,
124136
is_dynamic=False,
125-
observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr.with_args(
126-
**extra_args
127-
),
137+
observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr,
128138
)
129139

130140
bias_quantization_spec = None

0 commit comments

Comments
 (0)