Skip to content

Commit 8d87c0b

Browse files
committed
Fixed CUDA setup bugs, including #81.
1 parent 4844aef commit 8d87c0b

File tree

4 files changed

+41
-34
lines changed

4 files changed

+41
-34
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,3 +149,9 @@ Bug fixes:
149149

150150
Bug fixes:
151151
- Fixed a bug in the CUDA Setup which led to an incomprehensible error if no GPU was detected.
152+
153+
### 0.35.4
154+
155+
Bug fixes:
156+
- Fixed a bug in the CUDA Setup failed with the cuda runtime was found, but not the cuda library.
157+
- Fixed a bug where not finding the cuda runtime led to an incomprehensible error.

bitsandbytes/cextension.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -116,16 +116,14 @@ def get_instance(cls):
116116
CUDASetup.get_instance().generate_instructions()
117117
CUDASetup.get_instance().print_log_stack()
118118
raise RuntimeError('''
119-
CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment!
119+
CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs aboveto fix your environment!
120120
If you cannot find any issues and suspect a bug, please open an issue with detals about your environment:
121121
https://github.com/TimDettmers/bitsandbytes/issues''')
122122
lib.cadam32bit_g32
123123
lib.get_context.restype = ct.c_void_p
124124
lib.get_cusparse.restype = ct.c_void_p
125125
COMPILED_WITH_CUDA = True
126126
except AttributeError:
127-
warn(
128-
"The installed version of bitsandbytes was compiled without GPU support. "
129-
"8-bit optimizers and GPU quantization are unavailable."
130-
)
127+
warn("The installed version of bitsandbytes was compiled without GPU support. "
128+
"8-bit optimizers and GPU quantization are unavailable.")
131129
COMPILED_WITH_CUDA = False

bitsandbytes/cuda_setup/main.py

Lines changed: 31 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
"""
1818

1919
import ctypes
20+
import torch
2021

2122
from .paths import determine_cuda_runtime_lib_path
2223
from bitsandbytes.cextension import CUDASetup
@@ -29,8 +30,11 @@ def check_cuda_result(cuda, result_val):
2930
cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
3031
CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
3132

33+
34+
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
3235
def get_cuda_version(cuda, cudart_path):
33-
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
36+
if cuda is None: return None
37+
3438
try:
3539
cudart = ctypes.CDLL(cudart_path)
3640
except OSError:
@@ -72,7 +76,6 @@ def get_compute_capabilities(cuda):
7276
# bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
7377
"""
7478

75-
7679
nGpus = ctypes.c_int()
7780
cc_major = ctypes.c_int()
7881
cc_minor = ctypes.c_int()
@@ -99,11 +102,11 @@ def get_compute_capability(cuda):
99102
capabilities are downwards compatible. If no GPUs are detected, it returns
100103
None.
101104
"""
105+
if cuda is None: return None
106+
107+
# TODO: handle different compute capabilities; for now, take the max
102108
ccs = get_compute_capabilities(cuda)
103-
if ccs:
104-
# TODO: handle different compute capabilities; for now, take the max
105-
return ccs[-1]
106-
return None
109+
if ccs: return ccs[-1]
107110

108111

109112
def evaluate_cuda_setup():
@@ -113,28 +116,31 @@ def evaluate_cuda_setup():
113116
#print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
114117
#print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
115118
#print('='*80)
116-
#if not torch.cuda.is_available():
117-
#print('No GPU detected. Loading CPU library...')
118-
#return binary_name
119-
120-
binary_name = "libbitsandbytes_cpu.so"
119+
if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None
121120

122121
cuda_setup = CUDASetup.get_instance()
123122
cudart_path = determine_cuda_runtime_lib_path()
124-
if cudart_path is None:
125-
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
126-
return binary_name
127-
128-
cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}"))
129123
cuda = get_cuda_lib_handle()
130124
cc = get_compute_capability(cuda)
131-
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
132125
cuda_version_string = get_cuda_version(cuda, cudart_path)
133126

127+
failure = False
128+
if cudart_path is None:
129+
failure = True
130+
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
131+
else:
132+
cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}"))
134133

135134
if cc == '' or cc is None:
136-
cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library...", is_warning=True)
137-
return binary_name, cudart_path, cuda, cc, cuda_version_string
135+
failure = True
136+
cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
137+
else:
138+
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
139+
140+
if cuda is None:
141+
failure = True
142+
else:
143+
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
138144

139145
# 7.5 is the minimum CC vor cublaslt
140146
has_cublaslt = cc in ["7.5", "8.0", "8.6"]
@@ -145,16 +151,13 @@ def evaluate_cuda_setup():
145151

146152
# we use ls -l instead of nvcc to determine the cuda version
147153
# since most installations will have the libcudart.so installed, but not the compiler
148-
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
149154

150-
def get_binary_name():
155+
if failure:
156+
binary_name = "libbitsandbytes_cpu.so"
157+
elif has_cublaslt:
158+
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
159+
else:
151160
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
152-
bin_base_name = "libbitsandbytes_cuda"
153-
if has_cublaslt:
154-
return f"{bin_base_name}{cuda_version_string}.so"
155-
else:
156-
return f"{bin_base_name}{cuda_version_string}_nocublaslt.so"
157-
158-
binary_name = get_binary_name()
161+
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
159162

160163
return binary_name, cudart_path, cuda, cc, cuda_version_string

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def read(fname):
1818

1919
setup(
2020
name=f"bitsandbytes",
21-
version=f"0.35.3",
21+
version=f"0.35.4",
2222
author="Tim Dettmers",
2323
author_email="[email protected]",
2424
description="8-bit optimizers and matrix multiplication routines.",

0 commit comments

Comments
 (0)