Skip to content

Commit 60137ed

Browse files
authored
Merge branch 'main' into gh/swolchok/120/orig
2 parents 250d2fe + d92a763 commit 60137ed

File tree

151 files changed

+4134
-952
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

151 files changed

+4134
-952
lines changed

.ci/docker/common/install_base.sh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,11 @@ install_ubuntu() {
2626
libssl-dev \
2727
zip
2828

29+
# These libraries are needed by TorchVision
30+
apt-get install -y --no-install-recommends \
31+
libjpeg-dev \
32+
libpng-dev
33+
2934
# Cleanup package manager
3035
apt-get autoclean && apt-get clean
3136
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

.ci/docker/common/install_conda.sh

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,16 @@ install_miniconda() {
3131

3232
install_python() {
3333
pushd /opt/conda
34-
# Install the correct Python version
34+
# Install the selected Python version for CI jobs
3535
as_ci_user conda create -n "py_${PYTHON_VERSION}" -y --file /opt/conda/conda-env-ci.txt python="${PYTHON_VERSION}"
36+
37+
# From https://github.com/pytorch/pytorch/blob/main/.ci/docker/common/install_conda.sh
38+
if [[ $(uname -m) == "aarch64" ]]; then
39+
conda_install "openblas==0.3.28=*openmp*"
40+
else
41+
conda_install mkl=2022.1.0 mkl-include=2022.1.0
42+
fi
43+
3644
popd
3745
}
3846

@@ -53,7 +61,7 @@ fix_conda_ubuntu_libstdcxx() {
5361
# PyTorch sev: https://github.com/pytorch/pytorch/issues/105248
5462
# Ref: https://github.com/pytorch/pytorch/blob/main/.ci/docker/common/install_conda.sh
5563
if grep -e "2[02].04." /etc/issue >/dev/null; then
56-
rm "/opt/conda/envs/py_${PYTHON_VERSION}/lib/libstdc++.so.6"
64+
rm /opt/conda/envs/py_${PYTHON_VERSION}/lib/libstdc++.so*
5765
fi
5866
}
5967

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/bin/bash
2+
# Copyright (c) Qualcomm Innovation Center, Inc.
3+
# All rights reserved
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -exu
9+
10+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
11+
12+
export EXECUTORCH_ROOT="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)"
13+
export QNN_SDK_ROOT=/tmp/qnn/2.28.0.241029
14+
export LD_LIBRARY_PATH="${QNN_SDK_ROOT}/lib/x86_64-linux-clang"
15+
export PYTHONPATH=".."
16+
cp schema/program.fbs exir/_serialize/program.fbs
17+
cp schema/scalar_type.fbs exir/_serialize/scalar_type.fbs
18+
cp -f build-x86/backends/qualcomm/PyQnnManagerAdaptor.cpython-310-x86_64-linux-gnu.so backends/qualcomm/python
19+
cp -f build-x86/backends/qualcomm/PyQnnWrapperAdaptor.cpython-310-x86_64-linux-gnu.so backends/qualcomm/python
20+
21+
if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
22+
PYTHON_EXECUTABLE=python3
23+
fi
24+
25+
which "${PYTHON_EXECUTABLE}"
26+
27+
# Although static llama CI does not require graphviz, it is required by test_qnn_delegate.py
28+
pip install graphviz
29+
30+
# Download stories llama110m artifacts
31+
download_stories_model_artifacts
32+
echo "Creating tokenizer.bin"
33+
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
34+
35+
set +e
36+
# Compile only as weight sharing is not applicable on x86
37+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleScript.test_stories_single_llama --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir . --compile_only
38+
exit_code1=$?
39+
40+
# Checks accuracy with weight sharing disabled since x86 does not support weight sharing.
41+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleScript.test_stories_single_llama --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir . --enable_x86_64
42+
exit_code2=$?
43+
44+
# Check the exit codes and print messages
45+
if [ $exit_code1 -ne 0 ]; then
46+
echo "Static Llama compile only with weight sharing test failed. $exit_code1."
47+
fi
48+
49+
if [ $exit_code2 -ne 0 ]; then
50+
echo "Static Llama accuracy test failed. $exit_code2."
51+
fi
52+
53+
# Return failure if either program failed
54+
if [ $exit_code1 -ne 0 ] || [ $exit_code2 -ne 0 ]; then
55+
exit 1
56+
else
57+
exit 0
58+
fi
59+
set -e

.github/workflows/lint.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ jobs:
3131
# The generic Linux job chooses to use base env, not the one setup by the image
3232
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
3333
conda activate "${CONDA_ENV}"
34-
34+
3535
# For mypy linting, we need to first install executorch first so that
3636
# it builds the python package information.
3737
BUILD_TOOL="cmake"
@@ -74,6 +74,7 @@ jobs:
7474
docker-image: executorch-ubuntu-22.04-linter
7575
fetch-depth: 0
7676
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
77+
timeout: 90
7778
script: |
7879
FILES_NEEDS_FORMAT=$(/opt/google-java-format -n extension/android/src/main/java/org/pytorch/executorch/*.java \
7980
examples/demo-apps/android/ExecuTorchDemo/app/src/main/java/com/example/executorchdemo/*.java \

.github/workflows/pull.yml

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ jobs:
214214
docker-image: executorch-ubuntu-22.04-clang12
215215
submodules: 'true'
216216
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
217-
timeout: 180
217+
timeout: 90
218218
script: |
219219
# The generic Linux job chooses to use base env, not the one setup by the image
220220
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
@@ -439,6 +439,39 @@ jobs:
439439
# Test llama2
440440
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -mode "${MODE}" -dtype "${DTYPE}" -pt2e_quantize "${PT2E_QUANTIZE}"
441441
442+
test-static-llama-qnn-linux:
443+
name: test-static-llama-qnn-linux
444+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
445+
permissions:
446+
id-token: write
447+
contents: read
448+
strategy:
449+
fail-fast: false
450+
with:
451+
runner: linux.2xlarge
452+
docker-image: executorch-ubuntu-22.04-qnn-sdk
453+
submodules: 'true'
454+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
455+
timeout: 180
456+
script: |
457+
# The generic Linux job chooses to use base env, not the one setup by the image
458+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
459+
conda activate "${CONDA_ENV}"
460+
461+
BUILD_TOOL="cmake"
462+
463+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
464+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
465+
466+
# Setup executorch
467+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}"
468+
469+
# Setup install_requirements for llama
470+
PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh
471+
472+
# Test static llama weight sharing and accuracy
473+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_qnn_static_llama.sh
474+
442475
test-qnn-models-linux:
443476
name: test-qnn-models-linux
444477
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
@@ -528,7 +561,7 @@ jobs:
528561
docker-image: executorch-ubuntu-22.04-clang12
529562
submodules: 'true'
530563
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
531-
timeout: 180
564+
timeout: 90
532565
script: |
533566
# The generic Linux job chooses to use base env, not the one setup by the image
534567
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")

.gitmodules

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,3 +67,6 @@
6767
[submodule "backends/cadence/utils/FACTO"]
6868
path = backends/cadence/utils/FACTO
6969
url = https://github.com/pytorch-labs/FACTO.git
70+
[submodule "third-party/pocketfft"]
71+
path = third-party/pocketfft
72+
url = https://github.com/mreineck/pocketfft

CMakeLists.txt

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,10 @@ option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "Build the Data Loader extension"
182182
OFF
183183
)
184184

185+
option(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR "Build the Flat Tensor extension"
186+
OFF
187+
)
188+
185189
option(EXECUTORCH_BUILD_EXTENSION_MODULE "Build the Module extension" OFF)
186190

187191
option(EXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL "Build the Runner Util extension"
@@ -240,6 +244,9 @@ cmake_dependent_option(
240244
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
241245
)
242246

247+
if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
248+
set(EXECUTORCH_BUILF_EXTENSION_DATA_LOADER ON)
249+
endif()
243250

244251
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
245252
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
@@ -703,6 +710,11 @@ if(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
703710
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
704711
endif()
705712

713+
if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
714+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/flat_tensor)
715+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/flat_tensor/serialize)
716+
endif()
717+
706718
if(EXECUTORCH_BUILD_EXTENSION_MODULE)
707719
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/module)
708720
endif()

CONTRIBUTING.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,14 @@ must work with threading**
215215

216216
## Testing
217217

218+
### Running Tests Locally
219+
220+
CI is run automatically on all pull requests. However, if you want to run tests locally, here are some example commands (not exhaustive):
221+
222+
- The `sh test/build_size_test.sh` script will compile the C++runtime along with portable kernels.
223+
- The `test/run_oss_cpp_tests.sh` script will build and run C++ tests locally
224+
- Running `pytest` from the root directory will run Python tests locally.
225+
218226
### Writing Tests
219227
To help keep code quality high, ExecuTorch uses a combination of unit tests and
220228
end-to-end (e2e) tests. If you add a new feature or fix a bug, please add tests
@@ -229,8 +237,6 @@ If it's not clear how to add a test for your PR, take a look at the blame for
229237
the code you're modifying and find an author who has more context. Ask them
230238
for their help in the PR comments.
231239

232-
The `test/run_oss_cpp_tests.sh` script will build and run C++ tests locally.
233-
234240
### Continuous Integration
235241
See https://hud.pytorch.org/hud/pytorch/executorch/main for the current state of
236242
the CI (continuous integration) jobs. If `main` is broken, consider rebasing

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
<div align="center">
88
<a href="https://github.com/pytorch/executorch/graphs/contributors"><img src="https://img.shields.io/github/contributors/pytorch/executorch?style=for-the-badge&color=blue" alt="Contributors"></a>
99
<a href="https://github.com/pytorch/executorch/stargazers"><img src="https://img.shields.io/github/stars/pytorch/executorch?style=for-the-badge&color=blue" alt="Stargazers"></a>
10-
<a href="https://discord.gg/MeacgB7A"><img src="https://img.shields.io/badge/Discord-Join%20Us-purple?logo=discord&logoColor=white&style=for-the-badge" alt="Join our Discord community"></a>
10+
<a href="https://discord.gg/Dh43CKSAdc"><img src="https://img.shields.io/badge/Discord-Join%20Us-purple?logo=discord&logoColor=white&style=for-the-badge" alt="Join our Discord community"></a>
1111
<a href="https://pytorch.org/executorch/stable/index.html"><img src="https://img.shields.io/badge/Documentation-000?logo=googledocs&logoColor=FFE165&style=for-the-badge" alt="Check out the documentation"></a>
1212
<hr>
1313
</div>
@@ -55,11 +55,11 @@ To get started you can:
5555
## Feedback and Engagement
5656

5757
We welcome any feedback, suggestions, and bug reports from the community to help
58-
us improve our technology. Check out the [Discussion Board](https://github.com/pytorch/executorch/discussions) or chat real time with us on [Discord](https://discord.gg/MeacgB7A)
58+
us improve our technology. Check out the [Discussion Board](https://github.com/pytorch/executorch/discussions) or chat real time with us on [Discord](https://discord.gg/Dh43CKSAdc)
5959

6060
## Contributing
6161

62-
We welcome contributions. To get started review the [guidelines](CONTRIBUTING.md) and chat with us on [Discord](https://discord.gg/MeacgB7A)
62+
We welcome contributions. To get started review the [guidelines](CONTRIBUTING.md) and chat with us on [Discord](https://discord.gg/Dh43CKSAdc)
6363

6464

6565
## Directory Structure

backends/arm/_passes/decompose_select.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,13 @@ def call(self, graph_module: torch.fx.GraphModule):
3737
rank = len(input_node.meta["val"].size())
3838
dim = dim % rank if dim < 0 else dim
3939
index = index % rank if index < 0 else index
40-
dim_list = list(range(rank))
4140

4241
with graph_module.graph.inserting_before(node):
4342
slice_node = create_node(
4443
graph_module.graph, slice_op, (input_node, dim, index, index + 1)
4544
)
4645
squeeze_node = create_node(
47-
graph_module.graph, squeeze_op, (slice_node, dim_list)
46+
graph_module.graph, squeeze_op, (slice_node, [dim])
4847
)
4948

5049
node.replace_all_uses_with(squeeze_node)

0 commit comments

Comments
 (0)