Skip to content

Commit 8fcbe70

Browse files
committed
Update on "Serialize NamedDataStoreOutput into PTD."
Update PTD serialization to account for blobs from the NamedDataStoreOutput. Something we can do in the future is to consolidate tensors (that go through the emitter) and blobs (that come from the NamedDataStore). Differential Revision: [D70939807](https://our.internmc.facebook.com/intern/diff/D70939807/) [ghstack-poisoned]
2 parents d8447dc + 9bcbdbd commit 8fcbe70

File tree

327 files changed

+7208
-2832
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

327 files changed

+7208
-2832
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
295f2ed4d103017f7e19a7b8263ece606cd629db
1+
7ae0ce6360b6e4f944906502d20da24c04debee5

.ci/scripts/build_android_instrumentation.sh

Lines changed: 6 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -12,30 +12,10 @@ if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
1212
fi
1313
which "${PYTHON_EXECUTABLE}"
1414

15-
build_android_test() {
16-
mkdir -p extension/android/executorch_android/src/androidTest/resources
17-
cp extension/module/test/resources/add.pte extension/android/executorch_android/src/androidTest/resources
18-
pushd extension/android
19-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:testDebugUnitTest
20-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:assembleAndroidTest
21-
popd
22-
}
15+
mkdir -p "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
16+
cp extension/module/test/resources/add.pte "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
2317

24-
collect_artifacts_to_be_uploaded() {
25-
ARTIFACTS_DIR_NAME="$1"
26-
# Collect Java library test
27-
JAVA_LIBRARY_TEST_DIR="${ARTIFACTS_DIR_NAME}/library_test_dir"
28-
mkdir -p "${JAVA_LIBRARY_TEST_DIR}"
29-
cp extension/android/executorch_android/build/outputs/apk/androidTest/debug/*.apk "${JAVA_LIBRARY_TEST_DIR}"
30-
}
31-
32-
main() {
33-
build_android_test
34-
if [ -n "$ARTIFACTS_DIR_NAME" ]; then
35-
collect_artifacts_to_be_uploaded ${ARTIFACTS_DIR_NAME}
36-
fi
37-
}
38-
39-
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
40-
main "$@"
41-
fi
18+
pushd "${BUILD_AAR_DIR}"
19+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:testDebugUnitTest
20+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:assembleAndroidTest
21+
popd

.ci/scripts/gather_test_models.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from typing import Any
1515

1616
from examples.models import MODEL_NAME_TO_MODEL
17-
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
17+
from examples.xnnpack import MODEL_NAME_TO_OPTIONS, QuantType
1818

1919
DEFAULT_RUNNERS = {
2020
"linux": "linux.2xlarge",
@@ -33,7 +33,7 @@
3333
"dl3": "linux.4xlarge.memory",
3434
"emformer_join": "linux.4xlarge.memory",
3535
"emformer_predict": "linux.4xlarge.memory",
36-
"phi-4-mini": "linux.4xlarge.memory",
36+
"phi_4_mini": "linux.4xlarge.memory",
3737
}
3838
}
3939

@@ -154,7 +154,7 @@ def export_models_for_ci() -> dict[str, dict]:
154154
if backend == "xnnpack":
155155
if name not in MODEL_NAME_TO_OPTIONS:
156156
continue
157-
if MODEL_NAME_TO_OPTIONS[name].quantization:
157+
if MODEL_NAME_TO_OPTIONS[name].quantization != QuantType.NONE:
158158
backend += "-quantization"
159159

160160
if MODEL_NAME_TO_OPTIONS[name].delegation:

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ ${PYTHON_EXECUTABLE} -m examples.models.llama.export_llama \
7878
-qmode "torchao:8da${QLINEAR_BITWIDTH}w" \
7979
--group_size ${QLINEAR_GROUP_SIZE} \
8080
-E "torchao:${QEMBEDDING_BITWIDTH},${QEMBEDDING_GROUP_SIZE}" \
81-
--disable_dynamic_shape \
8281
-d fp32
8382

8483
# Test run

.ci/scripts/test_model.sh

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,11 +100,11 @@ test_model() {
100100
rm "./${MODEL_NAME}.pte"
101101
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102102
fi
103-
if [[ "${MODEL_NAME}" == "phi-4-mini" ]]; then
103+
if [[ "${MODEL_NAME}" == "phi_4_mini" ]]; then
104104
# Install requirements for export_llama
105105
bash examples/models/llama/install_requirements.sh
106106
# Test export_llama script: python3 -m examples.models.llama.export_llama.
107-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi-4-mini/config.json
107+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi_4_mini/config.json
108108
run_portable_executor_runner
109109
rm "./${MODEL_NAME}.pte"
110110
return
@@ -224,19 +224,22 @@ test_model_with_coreml() {
224224

225225
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
226226
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
227-
# TODO:
227+
228228
if [ -n "$EXPORTED_MODEL" ]; then
229229
EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
230230
mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
231231
EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
232-
echo "Renamed file path: $EXPORTED_MODEL"
232+
echo "OK exported model: $EXPORTED_MODEL"
233233
else
234-
echo "No .pte file found"
234+
echo "[error] failed to export model: no .pte file found"
235235
exit 1
236236
fi
237237

238238
# Run the model
239239
if [ "${should_test}" = true ]; then
240+
echo "Installing requirements needed to build coreml_executor_runner..."
241+
backends/apple/coreml/scripts/install_requirements.sh
242+
240243
echo "Testing exported model with coreml_executor_runner..."
241244
local out_dir=$(mktemp -d)
242245
COREML_EXECUTOR_RUNNER_OUT_DIR="${out_dir}" examples/apple/coreml/scripts/build_executor_runner.sh

.ci/scripts/unittest-linux.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ if [[ "$BUILD_TOOL" == "cmake" ]]; then
2121
source .ci/scripts/setup-vulkan-linux-deps.sh
2222

2323
PYTHON_EXECUTABLE=python \
24-
EXECUTORCH_BUILD_PYBIND=ON \
25-
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2625
.ci/scripts/setup-linux.sh "$@"
2726

2827
# Install llama3_2_vision dependencies.

.ci/scripts/unittest-macos-buck2.sh

100644100755
File mode changed.

.ci/scripts/unittest-macos.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ trap 'rm -rfv ${TMP_DIR}' EXIT
2121

2222
# Setup MacOS dependencies as there is no Docker support on MacOS atm
2323
PYTHON_EXECUTABLE=python \
24-
EXECUTORCH_BUILD_PYBIND=ON \
25-
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2625
${CONDA_RUN} --no-capture-output \
2726
.ci/scripts/setup-macos.sh "$@"
2827

.ci/scripts/utils.sh

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,12 +60,46 @@ install_pytorch_and_domains() {
6060
# Fetch the target commit
6161
pushd pytorch || return
6262
git checkout "${TORCH_VERSION}"
63-
git submodule update --init --recursive
6463

65-
export USE_DISTRIBUTED=1
66-
# Then build and install PyTorch
67-
python setup.py bdist_wheel
68-
pip install "$(echo dist/*.whl)"
64+
local system_name=$(uname)
65+
if [[ "${system_name}" == "Darwin" ]]; then
66+
local platform=$(python -c 'import sysconfig; import platform; v=platform.mac_ver()[0].split(".")[0]; platform=sysconfig.get_platform().split("-"); platform[1]=f"{v}_0"; print("_".join(platform))')
67+
fi
68+
local python_version=$(python -c 'import platform; v=platform.python_version_tuple(); print(f"{v[0]}{v[1]}")')
69+
local torch_release=$(cat version.txt)
70+
local torch_short_hash=${TORCH_VERSION:0:7}
71+
local torch_wheel_path="cached_artifacts/pytorch/executorch/pytorch_wheels/${system_name}/${python_version}"
72+
local torch_wheel_name="torch-${torch_release}%2Bgit${torch_short_hash}-cp${python_version}-cp${python_version}-${platform:-}.whl"
73+
74+
local cached_torch_wheel="https://gha-artifacts.s3.us-east-1.amazonaws.com/${torch_wheel_path}/${torch_wheel_name}"
75+
# Cache PyTorch wheel is only needed on MacOS, Linux CI already has this as part
76+
# of the Docker image
77+
local torch_wheel_not_found=0
78+
if [[ "${system_name}" == "Darwin" ]]; then
79+
pip install "${cached_torch_wheel}" || torch_wheel_not_found=1
80+
else
81+
torch_wheel_not_found=1
82+
fi
83+
84+
# Found no such wheel, we will build it from source then
85+
if [[ "${torch_wheel_not_found}" == "1" ]]; then
86+
echo "No cached wheel found, continue with building PyTorch at ${TORCH_VERSION}"
87+
88+
git submodule update --init --recursive
89+
USE_DISTRIBUTED=1 python setup.py bdist_wheel
90+
pip install "$(echo dist/*.whl)"
91+
92+
# Only AWS runners have access to S3
93+
if command -v aws && [[ -z "${GITHUB_RUNNER:-}" ]]; then
94+
for wheel_path in dist/*.whl; do
95+
local wheel_name=$(basename "${wheel_path}")
96+
echo "Caching ${wheel_name}"
97+
aws s3 cp "${wheel_path}" "s3://gha-artifacts/${torch_wheel_path}/${wheel_name}"
98+
done
99+
fi
100+
else
101+
echo "Use cached wheel at ${cached_torch_wheel}"
102+
fi
69103

70104
# Grab the pinned audio and vision commits from PyTorch
71105
TORCHAUDIO_VERSION=$(cat .github/ci_commit_pins/audio.txt)

.ci/scripts/wheel/envvar_base.sh

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,10 @@
88
# should typically only contain shell variable assignments. Be sure to export
99
# any variables so that subprocesses will see them.
1010

11-
# Enable pybindings so that users can execute ExecuTorch programs from python.
12-
export EXECUTORCH_BUILD_PYBIND=1
13-
1411
# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
1512
# if not defined.
1613
export CMAKE_ARGS="${CMAKE_ARGS:-}"
1714

1815
# Link the XNNPACK backend into the pybindings runtime so that users can execute
1916
# ExecuTorch programs that delegate to it.
20-
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"
17+
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON"

0 commit comments

Comments
 (0)