Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -862,6 +862,38 @@ jobs:
PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh


test-vulkan-models-linux:
name: test-vulkan-models-linux
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.2xlarge
docker-image: ci-image:executorch-ubuntu-22.04-clang12
submodules: 'recursive'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
script: |
set -eux

# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"

# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
source .ci/scripts/setup-vulkan-linux-deps.sh

# Setup python
PYTHON_EXECUTABLE=python \
CMAKE_ARGS="-DEXECUTORCH_BUILD_VULKAN=ON" \
.ci/scripts/setup-linux.sh --build-tool "cmake"

PYTHON_EXECUTABLE=python bash backends/vulkan/test/scripts/test_model.sh --build

# Test models serially
PYTHON_EXECUTABLE=python bash backends/vulkan/test/scripts/test_model.sh mv2
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: plan to add more models to this list as I verify what is currently working


nxp-build-test:
name: nxp-build-test
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
Expand Down
181 changes: 181 additions & 0 deletions backends/vulkan/test/scripts/test_model.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
#!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

set -exu

# Initialize variables
RUN_BUILD=false
RUN_CORRECTNESS_TEST=false
RUN_CLEAN=false
RUN_RECOMPILE=false
MODEL_NAME=""
OUTPUT_DIRECTORY="."

# Parse arguments
SKIP_NEXT=false
for i in $(seq 1 $#); do
if [[ "$SKIP_NEXT" == true ]]; then
SKIP_NEXT=false
continue
fi

arg="${!i}"
case $arg in
--build|-b)
RUN_BUILD=true
;;
--clean|-c)
RUN_CLEAN=true
;;
--recompile|-rc)
RUN_RECOMPILE=true
;;
--output_directory|-o)
next_i=$((i + 1))
if [[ $next_i -le $# ]]; then
OUTPUT_DIRECTORY="${!next_i}"
SKIP_NEXT=true
else
echo "Error: --output_directory|-o requires a value"
exit 1
fi
;;
--*|-*)
echo "Unknown argument: $arg"
exit 1
;;
*)
if [[ -z "$MODEL_NAME" ]]; then
MODEL_NAME="$arg"
else
echo "Multiple model names provided: $MODEL_NAME and $arg"
exit 1
fi
;;
esac
done

# Determine execution mode based on parsed arguments
if [[ "$RUN_BUILD" == true ]] && [[ -z "$MODEL_NAME" ]]; then
# Build-only mode
RUN_CORRECTNESS_TEST=false
elif [[ "$RUN_BUILD" == true ]] && [[ -n "$MODEL_NAME" ]]; then
# Build and test mode
RUN_CORRECTNESS_TEST=true
elif [[ "$RUN_BUILD" == false ]] && [[ -n "$MODEL_NAME" ]]; then
# Test-only mode
RUN_CORRECTNESS_TEST=true
else
echo "Invalid argument combination. Usage:"
echo " $0 --build|-b [--clean|-c] [--recompile|-rc] [-o|--output_directory DIR] # Build-only mode"
echo " $0 model_name [--build|-b] [--clean|-c] [--recompile|-rc] [-o|--output_directory DIR] # Test mode or build+test mode"
exit 1
fi

if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
PYTHON_EXECUTABLE=python3
fi
which "${PYTHON_EXECUTABLE}"

CMAKE_OUTPUT_DIR=cmake-out

# Only set EXPORTED_MODEL if running correctness test
if [[ "${RUN_CORRECTNESS_TEST}" == true ]]; then
EXPORTED_MODEL=${MODEL_NAME}_vulkan
fi


clean_build_directory() {
echo "Cleaning build directory: ${CMAKE_OUTPUT_DIR}"
rm -rf ${CMAKE_OUTPUT_DIR}
}

recompile() {
cmake --build cmake-out -j64 --target install
}

build_core_libraries_and_devtools() {
echo "Building core libraries and devtools with comprehensive Vulkan support..."

# Build core libraries with all required components
cmake . \
-DCMAKE_INSTALL_PREFIX=cmake-out \
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
-DEXECUTORCH_BUILD_KERNELS_CUSTOM_AOT=ON \
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
-DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=ON \
-DEXECUTORCH_BUILD_DEVTOOLS=ON \
-DEXECUTORCH_BUILD_VULKAN=ON \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DEXECUTORCH_BUILD_TESTS=ON \
-Bcmake-out && \
cmake --build cmake-out -j64 --target install

# Build devtools example runner
cmake examples/devtools \
-DCMAKE_INSTALL_PREFIX=cmake-out \
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
-DEXECUTORCH_BUILD_VULKAN=ON \
-Bcmake-out/examples/devtools && \
cmake --build cmake-out/examples/devtools -j16 --config Release
}

run_example_runner() {
./${CMAKE_OUTPUT_DIR}/examples/devtools/example_runner -bundled_program_path "${OUTPUT_DIRECTORY}/${EXPORTED_MODEL}.bpte" -output_verification
}

test_bundled_model_with_vulkan() {
# Export model as bundled program with Vulkan backend
"${PYTHON_EXECUTABLE}" -m examples.vulkan.export --model_name="${MODEL_NAME}" --output_dir="${OUTPUT_DIRECTORY}" --bundled

# Update exported model name for bundled program
EXPORTED_MODEL="${MODEL_NAME}_vulkan"

# Verify the exported bundled model exists
if [[ ! -f "${OUTPUT_DIRECTORY}/${EXPORTED_MODEL}.bpte" ]]; then
echo "Error: Failed to export bundled model ${MODEL_NAME} with Vulkan backend"
exit 1
fi

# Note: Running bundled programs may require different executor runner
echo "Bundled program created successfully. Use appropriate bundled program runner to test."

run_example_runner
}


# Main execution
if [[ "${RUN_BUILD}" == true ]]; then
if [[ "${RUN_CLEAN}" == true ]]; then
clean_build_directory
fi
build_core_libraries_and_devtools
fi

if [[ "${RUN_RECOMPILE}" == true ]]; then
recompile
fi

if [[ "${RUN_CORRECTNESS_TEST}" == true ]]; then
echo "Testing ${MODEL_NAME} with Vulkan backend..."
# Always use bundled program testing
test_bundled_model_with_vulkan

# Check if test completed successfully
if [[ $? -eq 0 ]]; then
echo "Vulkan model test completed successfully!"
else
echo "Vulkan model test failed!"
exit 1
fi
fi
4 changes: 4 additions & 0 deletions examples/devtools/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,10 @@ target_link_libraries(
portable_kernels
)

if (EXECUTORCH_BUILD_VULKAN)
target_link_libraries(example_runner vulkan_backend)
endif()

if(EXECUTORCH_BUILD_COREML)
find_library(ACCELERATE_FRAMEWORK Accelerate)
find_library(COREML_FRAMEWORK CoreML)
Expand Down
80 changes: 80 additions & 0 deletions examples/vulkan/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Vulkan Delegate Export Examples

This directory contains scripts for exporting models with the Vulkan delegate in ExecuTorch. Vulkan delegation allows you to run your models on devices with Vulkan-capable GPUs, potentially providing significant performance improvements over CPU execution.

## Scripts

- `export.py`: Basic export script for models to use with Vulkan delegate
- `aot_compiler.py`: Advanced export script with quantization support

## Usage

### Basic Export

```bash
python -m executorch.examples.vulkan.export -m <model_name> -o <output_dir>
```

### Export with Quantization (Experimental)

```bash
python -m executorch.examples.vulkan.aot_compiler -m <model_name> -q -o <output_dir>
```

### Dynamic Shape Support

```bash
python -m executorch.examples.vulkan.export -m <model_name> -d -o <output_dir>
```

### Additional Options

- `-s/--strict`: Export with strict mode (default: True)
- `-a/--segment_alignment`: Specify segment alignment in hex (default: 0x1000)
- `-e/--external_constants`: Save constants in external .ptd file (default: False)
- `-r/--etrecord`: Generate and save an ETRecord to the given file location

## Examples

```bash
# Export MobileNetV2 with Vulkan delegate
python -m executorch.examples.vulkan.export -m mobilenet_v2 -o ./exported_models

# Export MobileNetV3 with quantization
python -m executorch.examples.vulkan.aot_compiler -m mobilenet_v3 -q -o ./exported_models

# Export with dynamic shapes
python -m executorch.examples.vulkan.export -m mobilenet_v2 -d -o ./exported_models

# Export with ETRecord for debugging
python -m executorch.examples.vulkan.export -m mobilenet_v2 -r ./records/mobilenet_record.etrecord -o ./exported_models
```

## Supported Operations

The Vulkan delegate supports various operations including:

- Basic arithmetic (add, subtract, multiply, divide)
- Activations (ReLU, Sigmoid, Tanh, etc.)
- Convolutions (Conv1d, Conv2d, ConvTranspose2d)
- Pooling operations (MaxPool2d, AvgPool2d)
- Linear/Fully connected layers
- BatchNorm, GroupNorm
- Various tensor operations (cat, reshape, permute, etc.)

For a complete list of supported operations, refer to the Vulkan delegate implementation in the ExecuTorch codebase.

## Debugging and Optimization

If you encounter issues with Vulkan delegation:

1. Use `-r/--etrecord` to generate an ETRecord for debugging
2. Check if your operations are supported by the Vulkan delegate
3. Ensure your Vulkan drivers are up to date
4. Try using the export script with `--strict False` if strict mode causes issues

## Requirements

- Vulkan runtime libraries (libvulkan.so.1)
- A Vulkan-capable GPU with appropriate drivers
- PyTorch with Vulkan support
5 changes: 5 additions & 0 deletions examples/vulkan/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Loading
Loading