Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions .github/scripts/build-cuda.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,19 @@
declare build_arch
declare build_os
declare cuda_version
declare cuda_targets

set -xeuo pipefail

# By default, target Maxwell through Hopper.
build_capability="50;52;60;61;70;75;80;86;89;90"
if [[ -v cuda_targets ]]; then
build_capability="${cuda_targets}"
else
# By default, target Maxwell through Hopper.
build_capability="50;52;60;61;70;75;80;86;89;90"

# CUDA 12.8: Add sm100 and sm120; remove < sm75 to align with PyTorch 2.7+cu128 minimum
[[ "${cuda_version}" == 12.8.* ]] && build_capability="75;80;86;89;90;100;120"
# CUDA 12.8: Add sm100 and sm120; remove < sm75 to align with PyTorch 2.7+cu128 minimum
[[ "${cuda_version}" == 12.8.* ]] && build_capability="75;80;86;89;90;100;120"
fi

[[ "${build_os}" = windows-* ]] && python3 -m pip install ninja

Expand Down
174 changes: 174 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
name: Unit tests

on:
workflow_dispatch:
schedule:
# Every day at 02:15 AM UTC
- cron: "15 2 * * *"
push:
branches: [testing-ci]

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:

build-cpu:
strategy:
matrix:
os: [ubuntu-22.04, windows-2025]
arch: [x86_64]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4

- name: Setup MSVC
if: startsWith(matrix.os, 'windows')
uses: ilammy/[email protected] # to use cl

- name: Build C++
run: bash .github/scripts/build-cpu.sh
env:
build_os: ${{ matrix.os }}
build_arch: ${{ matrix.arch }}

- name: Upload build artifact
uses: actions/upload-artifact@v4
with:
name: lib_cpu_${{ matrix.os }}_${{ matrix.arch }}
path: output/${{ matrix.os }}/${{ matrix.arch }}/*
retention-days: 7

build-cuda:
strategy:
matrix:
cuda_version: ["11.8.0", "12.8.1"]
os: [ubuntu-22.04, windows-2025]
arch: [x86_64]
runs-on: ${{ matrix.os }}

steps:
- uses: actions/checkout@v4

- name: Install CUDA Toolkit
uses: Jimver/[email protected]
if: startsWith(matrix.os, 'windows')
id: cuda-toolkit
with:
cuda: ${{ matrix.cuda_version }}
method: "network"
sub-packages: '["nvcc","cudart","cusparse","cublas","thrust","nvrtc_dev","cublas_dev","cusparse_dev"]'
use-github-cache: false

- name: Setup MSVC
if: startsWith(matrix.os, 'windows')
uses: ilammy/[email protected] # to use cl

# We're running on T4 only for now, so we only target sm75.
- name: Build C++ / CUDA
run: bash .github/scripts/build-cuda.sh
env:
build_os: ${{ matrix.os }}
build_arch: x86_64
cuda_version: ${{ matrix.cuda_version }}
cuda_targets: "75"

- name: Upload build artifact
uses: actions/upload-artifact@v4
with:
name: lib_cuda_${{matrix.cuda_version}}_${{ matrix.os }}_${{ matrix.arch }}
path: output/${{ matrix.os }}/${{ matrix.arch }}/*
retention-days: 7

cpu-tests:
needs: build-cpu
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, windows-2025]
arch: [x86_64]
torch_version: ["2.7.0"]
runs-on: ${{ matrix.os }}
env:
BNB_TEST_DEVICE: cpu
steps:
- uses: actions/checkout@v4

- name: Download build artifact
uses: actions/download-artifact@v4
with:
name: lib_cpu_${{ matrix.os }}_${{ matrix.arch }}
path: bitsandbytes/
merge-multiple: true

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.9

- name: Install dependencies
run: |
pip install torch==${{ matrix.torch_version }} --index-url https://download.pytorch.org/whl/cpu
pip install -e ".[test]"
pip install pytest-cov

- name: Show installed packages
run: pip list

- name: Run tests
run: pytest

cuda-tests:
needs: build-cuda
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, windows-2025]
arch: [x86_64]
cuda_version: ["11.8.0", "12.8.1"]
include:
- cuda_version: "11.8.0"
torch_version: "2.4.1"
pypi_index: "https://download.pytorch.org/whl/cu118"
- cuda_version: "12.8.1"
torch_version: "2.7.0"
pypi_index: "https://download.pytorch.org/whl/cu128"
exclude:
# Our current T4 Windows runner has a driver too old (471.11)
# and cannot support CUDA 12+. Skip for now.
- os: windows-2025
cuda_version: "12.8.1"
runs-on:
labels: ${{ contains(matrix.os, 'windows') && 'CUDA-Windows-x64' || 'CUDA-Linux-x64' }}
env:
BNB_TEST_DEVICE: cuda
steps:
- name: Show GPU Information
run: nvidia-smi

- uses: actions/checkout@v4

- name: Download build artifact
uses: actions/download-artifact@v4
with:
name: lib_cuda_${{ matrix.cuda_version }}_${{ matrix.os }}_${{ matrix.arch }}
path: bitsandbytes/
merge-multiple: true

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.9

- name: Install dependencies
run: |
pip install torch==${{ matrix.torch_version }} --index-url ${{ matrix.pypi_index }}
pip install -e ".[test]"
pip install pytest-cov

- name: Show installed packages
run: pip list

- name: Run tests
run: pytest
16 changes: 15 additions & 1 deletion tests/test_functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -728,6 +728,9 @@ def test_int8_double_quant(self, dim1, dim2):
),
)
def test_integrated_int8_linear_matmul(self, device, dim1, dim4, inner):
if device == "cpu" and inner > 2048:
pytest.skip("Slow on CPU")

for i in range(k):
A = torch.randn(dim1, inner, device=device).half()
B = torch.randn(dim4, inner, device=device).half()
Expand Down Expand Up @@ -1316,7 +1319,18 @@ def test_gemv_4bit(self, device, dim, dtype, storage_type, quant_storage, double
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008

# TODO(matthewdouglas): On T4, dim=128-fp16-fc2-fp4-DQ will have relerror ~ 0.00092727
if (
device == "cuda"
and double_quant
and storage_type == "fp4"
and kind == "fc2"
and torch.cuda.get_device_capability() == (7, 5)
):
assert relerr1 < 0.00093
else:
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
Expand Down
4 changes: 4 additions & 0 deletions tests/test_optim.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from os.path import join
import shutil
import sys
import time
import uuid

Expand Down Expand Up @@ -168,6 +169,9 @@ def rm_path(path):
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097, 1], ids=id_formatter("dim2"))
def test_optimizer32bit(requires_cuda, dim1, dim2, gtype, optim_name):
if optim_name.startswith("paged_") and sys.platform == "win32":
pytest.skip("Paged optimizers can have issues on Windows.")

if gtype == torch.bfloat16 and optim_name in ["momentum", "rmsprop"]:
pytest.skip()
if dim1 == 1 and dim2 == 1:
Expand Down