|
| 1 | +# Test ExecutorTorch CUDA Build Compatibility |
| 2 | +# This workflow tests whether ExecutorTorch can be successfully built with CUDA support |
| 3 | +# across different CUDA versions (12.6, 12.8, 12.9) using the command: |
| 4 | +# CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON" ./install_executorch.sh |
| 5 | +# |
| 6 | +# Note: ExecutorTorch automatically detects the system CUDA version using nvcc and |
| 7 | +# installs the appropriate PyTorch wheel. No manual CUDA/PyTorch installation needed. |
| 8 | + |
| 9 | +name: Test CUDA Builds |
| 10 | + |
| 11 | +on: |
| 12 | + pull_request: |
| 13 | + push: |
| 14 | + branches: |
| 15 | + - main |
| 16 | + - release/* |
| 17 | + |
| 18 | +concurrency: |
| 19 | + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} |
| 20 | + cancel-in-progress: true |
| 21 | + |
| 22 | +jobs: |
| 23 | + test-cuda-builds: |
| 24 | + strategy: |
| 25 | + fail-fast: false |
| 26 | + matrix: |
| 27 | + cuda-version: ["12.6", "12.8", "12.9"] |
| 28 | + |
| 29 | + name: test-executorch-cuda-build-${{ matrix.cuda-version }} |
| 30 | + uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main |
| 31 | + permissions: |
| 32 | + id-token: write |
| 33 | + contents: read |
| 34 | + with: |
| 35 | + timeout: 90 |
| 36 | + runner: linux.g5.4xlarge.nvidia.gpu |
| 37 | + gpu-arch-type: cuda |
| 38 | + gpu-arch-version: ${{ matrix.cuda-version }} |
| 39 | + submodules: recursive |
| 40 | + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} |
| 41 | + script: | |
| 42 | + set -eux |
| 43 | +
|
| 44 | + # The generic Linux job chooses to use base env, not the one setup by the image |
| 45 | + CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") |
| 46 | + if [ -n "$CONDA_ENV" ]; then |
| 47 | + conda activate "${CONDA_ENV}" |
| 48 | + fi |
| 49 | +
|
| 50 | + # Test ExecutorTorch CUDA build - ExecutorTorch will automatically detect CUDA version |
| 51 | + # and install the appropriate PyTorch wheel when CMAKE_ARGS="-DEXECUTORCH_BUILD_CUDA=ON" |
| 52 | + PYTHON_EXECUTABLE=python bash .ci/scripts/test-cuda-build.sh "${{ matrix.cuda-version }}" |
| 53 | +
|
| 54 | + # This job will fail if any of the CUDA versions fail |
| 55 | + check-all-cuda-builds: |
| 56 | + needs: test-cuda-builds |
| 57 | + runs-on: ubuntu-latest |
| 58 | + if: always() |
| 59 | + steps: |
| 60 | + - name: Check if all CUDA builds succeeded |
| 61 | + run: | |
| 62 | + if [[ "${{ needs.test-cuda-builds.result }}" != "success" ]]; then |
| 63 | + echo "ERROR: One or more ExecutorTorch CUDA builds failed!" |
| 64 | + echo "CUDA build results: ${{ needs.test-cuda-builds.result }}" |
| 65 | + exit 1 |
| 66 | + else |
| 67 | + echo "SUCCESS: All ExecutorTorch CUDA builds (12.6, 12.8, 12.9) completed successfully!" |
| 68 | + fi |
0 commit comments