Skip to content

Commit 8b68170

Browse files
Merge branch 'main' into change-1105490
2 parents 8c09c9a + 73f7286 commit 8b68170

File tree

394 files changed

+23404
-5760
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

394 files changed

+23404
-5760
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
44d8d54e38c0258357d4e92e1fefe21e845947a3
1+
467660923a5a25e4718e1d6697b93ff1bab4e807
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
53a2908a10f414a2f85caa06703a26a40e873869
1+
e6f766c7d750d40603eee3f66c5915bac606b3ea

.ci/docker/requirements-ci.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
mpmath==1.3.0
22
numpy>=2.0.0; python_version >= '3.10'
33
PyYAML==6.0.1
4-
ruamel.yaml==0.17.32
4+
ruamel.yaml==0.18.15
55
sympy==1.12
66
timm==0.6.13
77
tomli==2.0.1
88
torchsr==1.0.4
9-
transformers==4.47.1
9+
transformers==4.56.1
1010
zstd==1.5.5.1
1111
pandas>=2.2.2; python_version >= '3.10'
1212
pytest==7.2.0

.ci/scripts/test-cuda-build.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,6 @@ test_executorch_cuda_build() {
2727
nvcc --version || echo "nvcc not found"
2828
nvidia-smi || echo "nvidia-smi not found"
2929

30-
# Set CMAKE_ARGS to enable CUDA build - ExecuTorch will handle PyTorch installation automatically
31-
export CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON"
32-
3330
echo "=== Starting ExecuTorch Installation ==="
3431
# Install ExecuTorch with CUDA support with timeout and error handling
3532
timeout 5400 ./install_executorch.sh || {

.ci/scripts/test_llama_lora.sh

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ cmake_build_llama_runner
5555
# Constants.
5656
RUNTIME_ARGS="--tokenizer_path=${DOWNLOADED_PATH}/tokenizer.model --temperature=0 --seq_len=20 --warmup=1"
5757
PROMPT="What happens if you eat watermelon seeds?"
58-
EXPECTED_PREFIX="What happens if you eat watermelon seeds? Watermelon seeds are a good source of vitamin C,"
58+
EXPECTED_PREFIX="What happens if you eat watermelon seeds? Watermelon seeds are a good source of vitamin C and"
5959

6060
# Export LoRA PTE file.
6161
MODEL_NAME="llama_3_2_1B_lora"
@@ -94,7 +94,7 @@ else
9494
exit 1
9595
fi
9696

97-
# Export LoRA PTE, PTD file.
97+
# Export LoRA PTE, foundation PTD file.
9898
MODEL_SEPARATE="${MODEL_NAME}_separate"
9999
$PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
100100
base.checkpoint="${DOWNLOADED_PATH}/consolidated.00.pth" \
@@ -114,20 +114,62 @@ $PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
114114
NOW=$(date +"%H:%M:%S")
115115
echo "Starting to run llama runner at ${NOW}"
116116
# shellcheck source=/dev/null
117-
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_SEPARATE}.pte --data_path=${MODEL_SEPARATE}.ptd --prompt="${PROMPT}" ${RUNTIME_ARGS} > result2.txt
117+
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_SEPARATE}.pte --data_paths=${MODEL_SEPARATE}.ptd --prompt="${PROMPT}" ${RUNTIME_ARGS} > result2.txt
118118
NOW=$(date +"%H:%M:%S")
119119
echo "Finished at ${NOW}"
120120

121121
RESULT2=$(cat result2.txt)
122122
if [[ "${RESULT2}" == "${EXPECTED_PREFIX}"* ]]; then
123123
echo "Expected result prefix: ${EXPECTED_PREFIX}"
124124
echo "Actual result: ${RESULT2}"
125+
# Do not clean up files if test passes, as they're re-used in the next test.
125126
echo "Success"
126-
cleanup_files
127127
else
128128
echo "Expected result prefix: ${EXPECTED_PREFIX}"
129129
echo "Actual result: ${RESULT2}"
130130
echo "Failure; results not the same"
131131
cleanup_files
132132
exit 1
133133
fi
134+
135+
# Export LoRA PTE, LoRA PTD, foundation PTD file.
136+
MODEL_PROGRAM_ONLY="${MODEL_NAME}_program"
137+
MODEL_LORA_WEIGHTS="lora_weights"
138+
MODEL_FOUNDATION_WEIGHTS="foundation_weights"
139+
$PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
140+
base.checkpoint="${DOWNLOADED_PATH}/consolidated.00.pth" \
141+
base.params="${DOWNLOADED_PATH}/params.json" \
142+
base.adapter_checkpoint="${DOWNLOADED_PATH}/adapter_model.pt" \
143+
base.adapter_config="${DOWNLOADED_PATH}/adapter_config.json" \
144+
base.tokenizer_path="${DOWNLOADED_PATH}/tokenizer.model" \
145+
model.use_kv_cache=true \
146+
model.use_sdpa_with_kv_cache=true \
147+
model.dtype_override="fp32" \
148+
backend.xnnpack.enabled=true \
149+
backend.xnnpack.extended_ops=true \
150+
export.output_name="${MODEL_PROGRAM_ONLY}.pte" \
151+
export.foundation_weights_file="${MODEL_FOUNDATION_WEIGHTS}.ptd" \
152+
export.lora_weights_file="${MODEL_LORA_WEIGHTS}.ptd"
153+
154+
# Run llama runner.
155+
NOW=$(date +"%H:%M:%S")
156+
echo "Starting to run llama runner at ${NOW}"
157+
# shellcheck source=/dev/null
158+
cmake-out/examples/models/llama/llama_main --model_path=${MODEL_PROGRAM_ONLY}.pte --data_paths="${MODEL_FOUNDATION_WEIGHTS}.ptd,${MODEL_LORA_WEIGHTS}.ptd" --prompt="${PROMPT}" ${RUNTIME_ARGS} > result3.txt
159+
NOW=$(date +"%H:%M:%S")
160+
echo "Finished at ${NOW}"
161+
162+
RESULT3=$(cat result3.txt)
163+
if [[ "${RESULT3}" == "${EXPECTED_PREFIX}"* ]]; then
164+
echo "Expected result prefix: ${EXPECTED_PREFIX}"
165+
echo "Actual result: ${RESULT3}"
166+
echo "Success"
167+
else
168+
echo "Expected result prefix: ${EXPECTED_PREFIX}"
169+
echo "Actual result: ${RESULT3}"
170+
echo "Failure; results not the same"
171+
cleanup_files
172+
exit 1
173+
fi
174+
175+
cleanup_files

.ci/scripts/test_phi_3_mini.sh

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,34 +36,33 @@ cmake_build_phi_3_mini() {
3636
cmake --build ${BUILD_DIR}/${MODEL_DIR} -j${NPROC} --config ${BUILD_TYPE}
3737
}
3838

39-
# Download and convert tokenizer.model
39+
# Download tokenizer.model
4040
prepare_tokenizer() {
41-
echo "Downloading and converting tokenizer.model"
42-
wget -O tokenizer.model "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/tokenizer.model?download=true"
43-
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
41+
echo "Downloading tokenizer.model"
42+
wget -O tokenizer.model "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/tokenizer.model?download=true"
4443
}
4544

4645
# Export phi-3-mini model to pte
4746
export_phi_3_mini () {
4847
echo "Exporting phi-3-mini. This will take a few minutes"
49-
$PYTHON_EXECUTABLE -m executorch.examples.models.phi-3-mini.export_phi-3-mini -c "4k" -s 128 -o phi-3-mini.pte
48+
optimum-cli export executorch --model microsoft/Phi-3-mini-4k-instruct --task text-generation --recipe xnnpack --output_dir ./
5049
}
5150

5251
run_and_verify() {
5352
NOW=$(date +"%H:%M:%S")
5453
echo "Starting to run phi-3-mini runner at ${NOW}"
55-
if [[ ! -f "phi-3-mini.pte" ]]; then
56-
echo "Export failed. Abort"
54+
if [[ ! -f "model.pte" ]]; then
55+
echo "Missing model artifact. Abort"
5756
exit 1
5857
fi
59-
if [[ ! -f "tokenizer.bin" ]]; then
60-
echo "tokenizer.bin is missing."
58+
if [[ ! -f "tokenizer.model" ]]; then
59+
echo "tokenizer.model is missing."
6160
exit 1
6261
fi
6362

6463
${BUILD_DIR}/${MODEL_DIR}/phi_3_mini_runner \
65-
--model_path=phi-3-mini.pte \
66-
--tokenizer_path=tokenizer.bin \
64+
--model_path=model.pte \
65+
--tokenizer_path=tokenizer.model \
6766
--seq_len=60 \
6867
--temperature=0 \
6968
--prompt="<|system|>
@@ -92,7 +91,7 @@ What is the capital of France?<|end|>
9291
cmake_install_executorch_libraries
9392
cmake_build_phi_3_mini
9493

95-
# Step 2. Export the tokenizer and model
94+
# Step 2. Export the model
9695
prepare_tokenizer
9796
export_phi_3_mini
9897

.ci/scripts/utils.sh

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,44 @@ install_pip_dependencies() {
4444
popd || return
4545
}
4646

47+
dedupe_macos_loader_path_rpaths() {
48+
if [[ "$(uname)" != "Darwin" ]]; then
49+
return
50+
fi
51+
52+
local torch_lib_dir
53+
pushd ..
54+
torch_lib_dir=$(python -c "import importlib.util; print(importlib.util.find_spec('torch').submodule_search_locations[0])")/lib
55+
popd
56+
57+
if [[ -z "${torch_lib_dir}" || ! -d "${torch_lib_dir}" ]]; then
58+
return
59+
fi
60+
61+
local torch_libs=(
62+
"libtorch_cpu.dylib"
63+
"libtorch.dylib"
64+
"libc10.dylib"
65+
)
66+
67+
for lib_name in "${torch_libs[@]}"; do
68+
local lib_path="${torch_lib_dir}/${lib_name}"
69+
if [[ ! -f "${lib_path}" ]]; then
70+
continue
71+
fi
72+
73+
local removed=0
74+
# Repeatedly remove the @loader_path rpath entries until none remain.
75+
while install_name_tool -delete_rpath @loader_path "${lib_path}" 2>/dev/null; do
76+
removed=1
77+
done
78+
79+
if [[ "${removed}" == "1" ]]; then
80+
install_name_tool -add_rpath @loader_path "${lib_path}" || true
81+
fi
82+
done
83+
}
84+
4785
install_domains() {
4886
echo "Install torchvision and torchaudio"
4987
pip install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${TORCHAUDIO_VERSION}"
@@ -101,6 +139,7 @@ install_pytorch_and_domains() {
101139
echo "Use cached wheel at ${cached_torch_wheel}"
102140
fi
103141

142+
dedupe_macos_loader_path_rpaths
104143
# Grab the pinned audio and vision commits from PyTorch
105144
TORCHAUDIO_VERSION=$(cat .github/ci_commit_pins/audio.txt)
106145
export TORCHAUDIO_VERSION

.githooks/pre-commit

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,24 @@
11
#!/usr/bin/env bash
22

3-
# Pre-commit hook to automatically update PyTorch commit pin when torch_pin.py changes
3+
# Pre-commit hook to automatically update PyTorch commit pin and sync c10 directories when torch_pin.py changes
44

55
# Check if torch_pin.py is being committed
66
if git diff --cached --name-only | grep -q "^torch_pin.py$"; then
77
echo "🔍 Detected changes to torch_pin.py"
8-
echo "📝 Updating PyTorch commit pin..."
8+
echo "📝 Updating PyTorch commit pin and syncing c10 directories..."
99

10-
# Run the update script
10+
# Run the update script (which now also syncs c10 directories)
1111
if python .github/scripts/update_pytorch_pin.py; then
12-
# Check if pytorch.txt was modified
12+
# Stage any modified files (pytorch.txt and grafted c10 files)
1313
if ! git diff --quiet .ci/docker/ci_commit_pins/pytorch.txt; then
14-
echo "✅ PyTorch commit pin updated successfully"
15-
# Stage the updated file
1614
git add .ci/docker/ci_commit_pins/pytorch.txt
1715
echo "📌 Staged .ci/docker/ci_commit_pins/pytorch.txt"
18-
else
19-
echo "ℹ️ PyTorch commit pin unchanged"
16+
fi
17+
18+
# Stage any grafted c10 files
19+
if ! git diff --quiet runtime/core/portable_type/c10/; then
20+
git add runtime/core/portable_type/c10/
21+
echo "📌 Staged grafted c10 files"
2022
fi
2123
else
2224
echo "❌ Failed to update PyTorch commit pin"

0 commit comments

Comments
 (0)