Skip to content

Commit bb29228

Browse files
authored
Add ability to build cuda wheels (#272)
1 parent 875f8bc commit bb29228

File tree

4 files changed

+164
-6
lines changed

4 files changed

+164
-6
lines changed
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
name: Build and test Linux CUDA wheels
2+
3+
on:
4+
pull_request:
5+
push:
6+
branches:
7+
- nightly
8+
- main
9+
- release/*
10+
tags:
11+
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
12+
workflow_dispatch:
13+
14+
concurrency:
15+
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
16+
cancel-in-progress: true
17+
18+
permissions:
19+
id-token: write
20+
contents: write
21+
22+
defaults:
23+
run:
24+
shell: bash -l -eo pipefail {0}
25+
26+
jobs:
27+
generate-matrix:
28+
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main
29+
with:
30+
package-type: wheel
31+
os: linux
32+
test-infra-repository: pytorch/test-infra
33+
test-infra-ref: main
34+
with-cpu: disable
35+
with-xpu: disable
36+
with-rocm: disable
37+
with-cuda: enable
38+
build-python-only: "disable"
39+
build:
40+
needs: generate-matrix
41+
strategy:
42+
fail-fast: false
43+
name: Build and Upload wheel
44+
uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main
45+
with:
46+
repository: pytorch/torchcodec
47+
ref: ""
48+
test-infra-repository: pytorch/test-infra
49+
test-infra-ref: main
50+
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
51+
post-script: packaging/post_build_script.sh
52+
smoke-test-script: packaging/fake_smoke_test.py
53+
package-name: torchcodec
54+
trigger-event: ${{ github.event_name }}
55+
build-platform: "python-build-package"
56+
build-command: "BUILD_AGAINST_ALL_FFMPEG_FROM_S3=1 ENABLE_CUDA=1 python -m build --wheel -vvv --no-isolation"
57+
58+
install-and-test:
59+
runs-on: linux.4xlarge.nvidia.gpu
60+
strategy:
61+
fail-fast: false
62+
matrix:
63+
# 3.9 corresponds to the minimum python version for which we build
64+
# the wheel unless the label cliflow/binaries/all is present in the
65+
# PR.
66+
# For the actual release we should add that label and change this to
67+
# include more python versions.
68+
python-version: ['3.9']
69+
cuda-version: ['11.8', '12.1', '12.4']
70+
ffmpeg-version-for-tests: ['5', '6', '7']
71+
container:
72+
image: "pytorch/manylinux-builder:cuda${{ matrix.cuda-version }}"
73+
options: "--gpus all -e NVIDIA_DRIVER_CAPABILITIES=video,compute,utility"
74+
if: ${{ always() }}
75+
needs: build
76+
steps:
77+
- name: Setup env vars
78+
run: |
79+
cuda_version_without_periods=$(echo "${{ matrix.cuda-version }}" | sed 's/\.//g')
80+
echo cuda_version_without_periods=${cuda_version_without_periods} >> $GITHUB_ENV
81+
- uses: actions/download-artifact@v3
82+
with:
83+
name: pytorch_torchcodec__3.9_cu${{ env.cuda_version_without_periods }}_x86_64
84+
path: pytorch/torchcodec/dist/
85+
- name: Setup miniconda using test-infra
86+
uses: ahmadsharif1/test-infra/.github/actions/setup-miniconda@14bc3c29f88d13b0237ab4ddf00aa409e45ade40
87+
with:
88+
python-version: ${{ matrix.python-version }}
89+
default-packages: "conda-forge::ffmpeg=${{ matrix.ffmpeg-version-for-tests }}"
90+
- name: Check env
91+
run: |
92+
${CONDA_RUN} env
93+
${CONDA_RUN} conda info
94+
${CONDA_RUN} nvidia-smi
95+
- name: Update pip
96+
run: ${CONDA_RUN} python -m pip install --upgrade pip
97+
- name: Install PyTorch
98+
run: |
99+
${CONDA_RUN} python -m pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu${{ env.cuda_version_without_periods }}
100+
${CONDA_RUN} python -c 'import torch; print(f"{torch.__version__}"); print(f"{torch.__file__}"); print(f"{torch.cuda.is_available()=}")'
101+
- name: Install torchcodec from the wheel
102+
run: |
103+
wheel_path=`find pytorch/torchcodec/dist -type f -name "*.whl"`
104+
echo Installing $wheel_path
105+
${CONDA_RUN} python -m pip install $wheel_path -vvv
106+
107+
- name: Check out repo
108+
uses: actions/checkout@v3
109+
110+
- name: Install cuda runtime dependencies
111+
run: |
112+
# For some reason nvidia::libnpp=12.4 doesn't install but nvidia/label/cuda-12.4.0::libnpp does.
113+
# So we use the latter convention for libnpp.
114+
${CONDA_RUN} conda install --yes nvidia/label/cuda-${{ matrix.cuda-version }}.0::libnpp nvidia::cuda-nvrtc=${{ matrix.cuda-version }} nvidia::cuda-toolkit=${{ matrix.cuda-version }} nvidia::cuda-cudart=${{ matrix.cuda-version }} nvidia::cuda-driver-dev=${{ matrix.cuda-version }}
115+
- name: Install test dependencies
116+
run: |
117+
${CONDA_RUN} python -m pip install --pre torchvision --index-url https://download.pytorch.org/whl/nightly/cpu
118+
# Ideally we would find a way to get those dependencies from pyproject.toml
119+
${CONDA_RUN} python -m pip install numpy pytest pillow
120+
121+
- name: Delete the src/ folder just for fun
122+
run: |
123+
# The only reason we checked-out the repo is to get access to the
124+
# tests. We don't care about the rest. Out of precaution, we delete
125+
# the src/ folder to be extra sure that we're running the code from
126+
# the installed wheel rather than from the source.
127+
# This is just to be extra cautious and very overkill because a)
128+
# there's no way the `torchcodec` package from src/ can be found from
129+
# the PythonPath: the main point of `src/` is precisely to protect
130+
# against that and b) if we ever were to execute code from
131+
# `src/torchcodec`, it would fail loudly because the built .so files
132+
# aren't present there.
133+
rm -r src/
134+
ls
135+
- name: Smoke test
136+
run: |
137+
${CONDA_RUN} python test/decoders/manual_smoke_test.py
138+
- name: Run Python tests
139+
run: |
140+
# We skip test_get_ffmpeg_version because it may not have a micro version.
141+
${CONDA_RUN} FAIL_WITHOUT_CUDA=1 pytest test -k "not test_get_ffmpeg_version" -vvv
142+
- name: Run Python benchmark
143+
run: |
144+
${CONDA_RUN} time python benchmarks/decoders/gpu_benchmark.py --devices=cuda:0,cpu --resize_devices=none

packaging/post_build_script.sh

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,14 @@ wheel_path=$(pwd)/$(find dist -type f -name "*.whl")
66
echo "Wheel content:"
77
unzip -l $wheel_path
88

9-
for ffmpeg_major_version in 4 5 6 7; do
9+
ffmpeg_versions=(4 5 6 7)
10+
11+
# TODO: Make ffmpeg4 work with nvcc.
12+
if [ "$ENABLE_CUDA" -eq 1 ]; then
13+
ffmpeg_versions=(5 6 7)
14+
fi
15+
16+
for ffmpeg_major_version in ${ffmepg_versions[@]}; do
1017
assert_in_wheel $wheel_path torchcodec/libtorchcodec${ffmpeg_major_version}.so
1118
done
1219
assert_not_in_wheel $wheel_path libtorchcodec.so

src/torchcodec/decoders/_core/CMakeLists.txt

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ function(make_torchcodec_library library_name ffmpeg_target)
3737
set(NEEDED_LIBRARIES ${ffmpeg_target} ${TORCH_LIBRARIES}
3838
${Python3_LIBRARIES})
3939
if(ENABLE_CUDA)
40-
list(APPEND NEEDED_LIBRARIES ${CUDA_CUDA_LIBRARY}
40+
list(APPEND NEEDED_LIBRARIES
4141
${CUDA_nppi_LIBRARY} ${CUDA_nppicc_LIBRARY} )
4242
endif()
4343
target_link_libraries(
@@ -76,10 +76,15 @@ if(DEFINED ENV{BUILD_AGAINST_ALL_FFMPEG_FROM_S3})
7676
${CMAKE_CURRENT_SOURCE_DIR}/fetch_and_expose_non_gpl_ffmpeg_libs.cmake
7777
)
7878

79-
make_torchcodec_library(libtorchcodec4 ffmpeg4)
80-
make_torchcodec_library(libtorchcodec5 ffmpeg5)
81-
make_torchcodec_library(libtorchcodec6 ffmpeg6)
82-
make_torchcodec_library(libtorchcodec7 ffmpeg7)
79+
80+
if(NOT ENABLE_CUDA)
81+
# TODO: Enable more ffmpeg versions for cuda.
82+
make_torchcodec_library(libtorchcodec4 ffmpeg4)
83+
endif()
84+
make_torchcodec_library(libtorchcodec7 ffmpeg7)
85+
make_torchcodec_library(libtorchcodec6 ffmpeg6)
86+
make_torchcodec_library(libtorchcodec5 ffmpeg5)
87+
8388
else()
8489
message(
8590
STATUS

test/utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
# Decorator for skipping CUDA tests when CUDA isn't available
1616
def needs_cuda(test_item):
1717
if not torch.cuda.is_available():
18+
if os.environ.get("FAIL_WITHOUT_CUDA") == "1":
19+
raise RuntimeError("CUDA is required for this test")
1820
return pytest.mark.skip(reason="CUDA not available")(test_item)
1921
return test_item
2022

0 commit comments

Comments
 (0)