Skip to content

Commit d3f9578

Browse files
Merge branch 'main' into refactor-bitwise-logical-tests
2 parents c7e84de + eef0010 commit d3f9578

File tree

178 files changed

+4645
-132355
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

178 files changed

+4645
-132355
lines changed

.ci/scripts/setup-openvino.sh

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -ex
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
git clone https://github.com/openvinotoolkit/openvino.git
14+
cd openvino && git checkout releases/2025/1
15+
git submodule update --init --recursive
16+
sudo ./install_build_dependencies.sh
17+
mkdir build && cd build
18+
cmake .. -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=ON
19+
make -j$(nproc)
20+
21+
cd ..
22+
cmake --install build --prefix dist
23+
24+
source dist/setupvars.sh
25+
cd ../backends/openvino
26+
pip install -r requirements.txt
27+
cd scripts
28+
./openvino_build.sh --enable_python

.ci/scripts/test_llama.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ $PYTHON_EXECUTABLE -m examples.models.llama.export_llama ${EXPORT_ARGS}
269269

270270
# Create tokenizer.bin.
271271
echo "Creating tokenizer.bin"
272-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
272+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
273273

274274

275275
RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --tokenizer_path=tokenizer.bin --prompt=Once --temperature=0 --seq_len=10 --warmup=1"

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ cmake --build cmake-out/examples/models/llama -j16 --config Release
5555
download_stories_model_artifacts
5656

5757
echo "Creating tokenizer.bin"
58-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
58+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
5959

6060
# Export model
6161
LLAMA_CHECKPOINT=stories110M.pt

.ci/scripts/test_openvino.sh

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -ex
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
source openvino/dist/setupvars.sh
14+
cd backends/openvino/tests
15+
python test_runner.py --test_type ops
16+
python test_runner.py --test_type models

.ci/scripts/test_phi_3_mini.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ cmake_build_phi_3_mini() {
5656
prepare_tokenizer() {
5757
echo "Downloading and converting tokenizer.model"
5858
wget -O tokenizer.model "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/tokenizer.model?download=true"
59-
$PYTHON_EXECUTABLE -m executorch.extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
59+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
6060
}
6161

6262
# Export phi-3-mini model to pte

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ pip install graphviz
3030
# Download stories llama110m artifacts
3131
download_stories_model_artifacts
3232
echo "Creating tokenizer.bin"
33-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
33+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
3434

3535
set +e
3636
# Compile only as weight sharing is not applicable on x86

.github/workflows/pull.yml

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -736,3 +736,25 @@ jobs:
736736
conda activate "${CONDA_ENV}"
737737
738738
# placeholder for mediatek to add more tests
739+
740+
test-openvino-linux:
741+
name: test-openvino-linux
742+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
743+
permissions:
744+
id-token: write
745+
contents: read
746+
strategy:
747+
fail-fast: false
748+
with:
749+
runner: linux.2xlarge
750+
docker-image: executorch-ubuntu-22.04-gcc9
751+
submodules: 'true'
752+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
753+
timeout: 90
754+
script: |
755+
# The generic Linux job chooses to use base env, not the one setup by the image
756+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
757+
conda activate "${CONDA_ENV}"
758+
759+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-openvino.sh
760+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_openvino.sh

.gitmodules

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
[submodule "backends/arm/third-party/ethos-u-core-driver"]
22
path = backends/arm/third-party/ethos-u-core-driver
3-
url = https://github.com/pytorch-labs/ethos-u-core-driver-mirror
3+
url = https://git.gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-core-driver.git
44
[submodule "backends/arm/third-party/serialization_lib"]
55
path = backends/arm/third-party/serialization_lib
6-
url = https://github.com/pytorch-labs/tosa_serialization_lib-mirror
6+
url = https://git.gitlab.arm.com/tosa/tosa-serialization.git
77
[submodule "backends/vulkan/third-party/Vulkan-Headers"]
88
path = backends/vulkan/third-party/Vulkan-Headers
99
url = https://github.com/KhronosGroup/Vulkan-Headers

.lintrunner.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -299,12 +299,14 @@ include_patterns = [
299299
# TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders.
300300
# 'backends/**/*.py',
301301
'backends/arm/**/*.py',
302+
'backends/openvino/**/*.py',
302303
'build/**/*.py',
303304
'codegen/**/*.py',
304305
# 'devtools/**/*.py',
305306
'devtools/visualization/**/*.py',
306307
'docs/**/*.py',
307308
# 'examples/**/*.py',
309+
'examples/openvino/**/*.py',
308310
# 'exir/**/*.py',
309311
# 'extension/**/*.py',
310312
'kernels/**/*.py',

CMakeLists.txt

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Copyright (c) Meta Platforms, Inc. and affiliates.
2-
# Copyright 2024-2025 Arm Limited and/or its affiliates.
32
# All rights reserved.
3+
# Copyright 2024-2025 Arm Limited and/or its affiliates.
44
#
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
@@ -161,7 +161,7 @@ if(OPTIMIZE_SIZE)
161161
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Os")
162162
else()
163163
# -O2: Moderate opt.
164-
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2")
164+
set(CMAKE_CXX_FLAGS_RELEASE "-O2 ${CMAKE_CXX_FLAGS_RELEASE}")
165165
endif()
166166

167167
option(EXECUTORCH_BUILD_ANDROID_JNI "Build Android JNI" OFF)
@@ -204,6 +204,8 @@ option(EXECUTORCH_BUILD_MPS "Build the MPS backend" OFF)
204204

205205
option(EXECUTORCH_BUILD_NEURON "Build the backends/mediatek directory" OFF)
206206

207+
option(EXECUTORCH_BUILD_OPENVINO "Build the Openvino backend" OFF)
208+
207209
option(EXECUTORCH_BUILD_PYBIND "Build the Python Bindings" OFF)
208210

209211
option(EXECUTORCH_BUILD_QNN "Build the Qualcomm backend" OFF)
@@ -715,6 +717,10 @@ if(EXECUTORCH_BUILD_NEURON)
715717
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/mediatek)
716718
endif()
717719

720+
if(EXECUTORCH_BUILD_OPENVINO)
721+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/openvino)
722+
endif()
723+
718724
if(EXECUTORCH_BUILD_QNN)
719725
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/qualcomm)
720726
endif()
@@ -751,7 +757,7 @@ if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
751757
endif()
752758

753759
if(EXECUTORCH_BUILD_EXTENSION_LLM)
754-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizer)
760+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizers)
755761
endif()
756762

757763
if(EXECUTORCH_BUILD_EXTENSION_MODULE)
@@ -817,6 +823,10 @@ if(EXECUTORCH_BUILD_PYBIND)
817823
list(APPEND _dep_libs mpsdelegate)
818824
endif()
819825

826+
if(EXECUTORCH_BUILD_OPENVINO)
827+
list(APPEND _dep_libs openvino_backend)
828+
endif()
829+
820830
if(EXECUTORCH_BUILD_XNNPACK)
821831
# need to explicitly specify XNNPACK and microkernels-prod
822832
# here otherwise uses XNNPACK and microkernel-prod symbols from libtorch_cpu

0 commit comments

Comments
 (0)