Skip to content

Commit 0abfba1

Browse files
authored
Merge branch 'main' into op-floor-div
2 parents f482fe7 + 651b357 commit 0abfba1

File tree

397 files changed

+17348
-5856
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

397 files changed

+17348
-5856
lines changed

.ci/docker/requirements-ci.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
mpmath==1.3.0
22
numpy>=2.0.0; python_version >= '3.10'
33
PyYAML==6.0.1
4-
ruamel.yaml==0.17.32
4+
ruamel.yaml==0.18.15
55
sympy==1.12
66
timm==0.6.13
77
tomli==2.0.1

.ci/scripts/test-cuda-build.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,6 @@ test_executorch_cuda_build() {
2727
nvcc --version || echo "nvcc not found"
2828
nvidia-smi || echo "nvidia-smi not found"
2929

30-
# Set CMAKE_ARGS to enable CUDA build - ExecuTorch will handle PyTorch installation automatically
31-
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
32-
3330
echo "=== Starting ExecuTorch Installation ==="
3431
# Install ExecuTorch with CUDA support with timeout and error handling
3532
timeout 5400 ./install_executorch.sh || {

.ci/scripts/test_llama_lora.sh

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ cmake_build_llama_runner
5555
# Constants.
5656
RUNTIME_ARGS="--tokenizer_path=${DOWNLOADED_PATH}/tokenizer.model --temperature=0 --seq_len=20 --warmup=1"
5757
PROMPT="What happens if you eat watermelon seeds?"
58-
EXPECTED_PREFIX="What happens if you eat watermelon seeds? Watermelon seeds are a good source of vitamin C,"
58+
EXPECTED_PREFIX="What happens if you eat watermelon seeds? Watermelon seeds are a good source of vitamin C and"
5959

6060
# Export LoRA PTE file.
6161
MODEL_NAME="llama_3_2_1B_lora"
@@ -94,7 +94,7 @@ else
9494
exit 1
9595
fi
9696

97-
# Export LoRA PTE, PTD file.
97+
# Export LoRA PTE, foundation PTD file.
9898
MODEL_SEPARATE="${MODEL_NAME}_separate"
9999
$PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
100100
base.checkpoint="${DOWNLOADED_PATH}/consolidated.00.pth" \
@@ -114,20 +114,62 @@ $PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
114114
NOW=$(date +"%H:%M:%S")
115115
echo "Starting to run llama runner at ${NOW}"
116116
# shellcheck source=/dev/null
117-
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_SEPARATE}.pte --data_path=${MODEL_SEPARATE}.ptd --prompt="${PROMPT}" ${RUNTIME_ARGS} > result2.txt
117+
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_SEPARATE}.pte --data_paths=${MODEL_SEPARATE}.ptd --prompt="${PROMPT}" ${RUNTIME_ARGS} > result2.txt
118118
NOW=$(date +"%H:%M:%S")
119119
echo "Finished at ${NOW}"
120120

121121
RESULT2=$(cat result2.txt)
122122
if [[ "${RESULT2}" == "${EXPECTED_PREFIX}"* ]]; then
123123
echo "Expected result prefix: ${EXPECTED_PREFIX}"
124124
echo "Actual result: ${RESULT2}"
125+
# Do not clean up files if test passes, as they're re-used in the next test.
125126
echo "Success"
126-
cleanup_files
127127
else
128128
echo "Expected result prefix: ${EXPECTED_PREFIX}"
129129
echo "Actual result: ${RESULT2}"
130130
echo "Failure; results not the same"
131131
cleanup_files
132132
exit 1
133133
fi
134+
135+
# Export LoRA PTE, LoRA PTD, foundation PTD file.
136+
MODEL_PROGRAM_ONLY="${MODEL_NAME}_program"
137+
MODEL_LORA_WEIGHTS="lora_weights"
138+
MODEL_FOUNDATION_WEIGHTS="foundation_weights"
139+
$PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
140+
base.checkpoint="${DOWNLOADED_PATH}/consolidated.00.pth" \
141+
base.params="${DOWNLOADED_PATH}/params.json" \
142+
base.adapter_checkpoint="${DOWNLOADED_PATH}/adapter_model.pt" \
143+
base.adapter_config="${DOWNLOADED_PATH}/adapter_config.json" \
144+
base.tokenizer_path="${DOWNLOADED_PATH}/tokenizer.model" \
145+
model.use_kv_cache=true \
146+
model.use_sdpa_with_kv_cache=true \
147+
model.dtype_override="fp32" \
148+
backend.xnnpack.enabled=true \
149+
backend.xnnpack.extended_ops=true \
150+
export.output_name="${MODEL_PROGRAM_ONLY}.pte" \
151+
export.foundation_weights_file="${MODEL_FOUNDATION_WEIGHTS}.ptd" \
152+
export.lora_weights_file="${MODEL_LORA_WEIGHTS}.ptd"
153+
154+
# Run llama runner.
155+
NOW=$(date +"%H:%M:%S")
156+
echo "Starting to run llama runner at ${NOW}"
157+
# shellcheck source=/dev/null
158+
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_PROGRAM_ONLY}.pte --data_paths="${MODEL_FOUNDATION_WEIGHTS}.ptd,${MODEL_LORA_WEIGHTS}.ptd" --prompt="${PROMPT}" ${RUNTIME_ARGS} > result3.txt
159+
NOW=$(date +"%H:%M:%S")
160+
echo "Finished at ${NOW}"
161+
162+
RESULT3=$(cat result3.txt)
163+
if [[ "${RESULT3}" == "${EXPECTED_PREFIX}"* ]]; then
164+
echo "Expected result prefix: ${EXPECTED_PREFIX}"
165+
echo "Actual result: ${RESULT3}"
166+
echo "Success"
167+
else
168+
echo "Expected result prefix: ${EXPECTED_PREFIX}"
169+
echo "Actual result: ${RESULT3}"
170+
echo "Failure; results not the same"
171+
cleanup_files
172+
exit 1
173+
fi
174+
175+
cleanup_files

.ci/scripts/test_llava.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ EXECUTORCH_COMMON_CMAKE_ARGS=" \
3838
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
3939
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
4040
-DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \
41+
-DEXECUTORCH_BUILD_EXTENSION_NAMED_DATA_MAP=ON \
4142
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4243
-DEXECUTORCH_BUILD_EXTENSION_LLM_RUNNER=ON \
4344
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \

.ci/scripts/test_wheel_package_qnn.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,12 @@ PY
158158

159159
# Install torchao based on the pinned PyTorch version
160160
"$PIPBIN" install torch=="${TORCH_VERSION}.${NIGHTLY_VERSION}" --index-url "https://download.pytorch.org/whl/nightly/cpu"
161+
"$PIPBIN" install wheel
161162

162163
# Install torchao based on the pinned commit from third-party/ao submodule
163164
pushd "$REPO_ROOT/third-party/ao" > /dev/null
164-
USE_CPP=0 "$PYBIN" setup.py develop
165+
export USE_CPP=0
166+
"$PIPBIN" install . --no-build-isolation
165167
popd > /dev/null
166168

167169
echo "=== [$LABEL] Import smoke tests ==="

.githooks/README.md

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# Git Hooks
2+
3+
This directory contains Git hooks for the ExecuTorch repository.
4+
5+
## Pre-commit Hook
6+
7+
The pre-commit hook automatically updates the PyTorch commit pin in `.ci/docker/ci_commit_pins/pytorch.txt` whenever `torch_pin.py` is modified.
8+
9+
### How It Works
10+
11+
1. When you commit changes to `torch_pin.py`, the hook detects the change
12+
2. It parses the `NIGHTLY_VERSION` field (e.g., `dev20251004`)
13+
3. Converts it to a date string (e.g., `2025-10-04`)
14+
4. Fetches the corresponding commit hash from the PyTorch nightly branch at https://github.com/pytorch/pytorch/tree/nightly
15+
5. Updates `.ci/docker/ci_commit_pins/pytorch.txt` with the new commit hash
16+
6. Automatically stages the updated file for commit
17+
18+
### Installation
19+
20+
To install the Git hooks, run:
21+
22+
```bash
23+
.githooks/install.sh
24+
```
25+
26+
This will copy the pre-commit hook to `.git/hooks/` and make it executable.
27+
28+
### Manual Usage
29+
30+
You can also run the update script manually at any time:
31+
32+
```bash
33+
python .github/scripts/update_pytorch_pin.py
34+
```
35+
36+
### Uninstalling
37+
38+
To remove the pre-commit hook:
39+
40+
```bash
41+
rm .git/hooks/pre-commit
42+
```
43+
44+
## Troubleshooting
45+
46+
If the hook fails during a commit:
47+
48+
1. Check that Python 3 is available in your PATH
49+
2. Ensure you have internet connectivity to fetch commits from GitHub
50+
3. Verify that the `NIGHTLY_VERSION` in `torch_pin.py` is in the correct format (`devYYYYMMDD`)
51+
4. Make sure the corresponding nightly release exists in the PyTorch nightly branch
52+
53+
You can run the script manually to see detailed error messages:
54+
55+
```bash
56+
python .github/scripts/update_pytorch_pin.py
57+
```

.githooks/install.sh

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#!/usr/bin/env bash
2+
3+
# Script to install Git hooks from .githooks directory
4+
5+
set -e
6+
7+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
8+
GIT_DIR="$(git rev-parse --git-dir)"
9+
HOOKS_DIR="${GIT_DIR}/hooks"
10+
11+
echo "Installing Git hooks..."
12+
13+
# Install pre-commit hook
14+
echo "📦 Installing pre-commit hook..."
15+
cp "${SCRIPT_DIR}/pre-commit" "${HOOKS_DIR}/pre-commit"
16+
chmod +x "${HOOKS_DIR}/pre-commit"
17+
echo "✅ pre-commit hook installed"
18+
19+
echo ""
20+
echo "🎉 Git hooks installed successfully!"
21+
echo ""
22+
echo "The pre-commit hook will automatically update .ci/docker/ci_commit_pins/pytorch.txt"
23+
echo "whenever you commit changes to torch_pin.py"

.githooks/pre-commit

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/usr/bin/env bash
2+
3+
# Pre-commit hook to automatically update PyTorch commit pin and sync c10 directories when torch_pin.py changes
4+
5+
# Check if torch_pin.py is being committed
6+
if git diff --cached --name-only | grep -q "^torch_pin.py$"; then
7+
echo "🔍 Detected changes to torch_pin.py"
8+
echo "📝 Updating PyTorch commit pin and syncing c10 directories..."
9+
10+
# Run the update script (which now also syncs c10 directories)
11+
if python .github/scripts/update_pytorch_pin.py; then
12+
# Stage any modified files (pytorch.txt and grafted c10 files)
13+
if ! git diff --quiet .ci/docker/ci_commit_pins/pytorch.txt; then
14+
git add .ci/docker/ci_commit_pins/pytorch.txt
15+
echo "📌 Staged .ci/docker/ci_commit_pins/pytorch.txt"
16+
fi
17+
18+
# Stage any grafted c10 files
19+
if ! git diff --quiet runtime/core/portable_type/c10/; then
20+
git add runtime/core/portable_type/c10/
21+
echo "📌 Staged grafted c10 files"
22+
fi
23+
else
24+
echo "❌ Failed to update PyTorch commit pin"
25+
echo "Please run: python .github/scripts/update_pytorch_pin.py"
26+
exit 1
27+
fi
28+
fi
29+
30+
exit 0

0 commit comments

Comments
 (0)