Skip to content

Commit 1887f00

Browse files
authored
Merge branch 'main' into version-info
2 parents 91c7518 + 8a45bfa commit 1887f00

19 files changed

+606
-168
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,3 +133,4 @@ dmypy.json
133133

134134
dependencies
135135
cuda_build
136+
.vscode/*

.style.yapf

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
[style]
2+
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = True
3+
ALLOW_MULTILINE_LAMBDAS = True
4+
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = True
5+
COLUMN_LIMIT = 88
6+
COALESCE_BRACKETS = True
7+
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
8+
SPACES_BEFORE_COMMENT = 2
9+
SPLIT_BEFORE_BITWISE_OPERATOR = True
10+
SPLIT_BEFORE_FIRST_ARGUMENT = True
11+
SPLIT_BEFORE_LOGICAL_OPERATOR = True
12+
SPLIT_BEFORE_NAMED_ASSIGNS = True
13+
SPLIT_COMPLEX_COMPREHENSION = True

CHANGELOG.md

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,19 @@ User experience:
311311
Performance:
312312
- improved 4-bit inference performance for A100 GPUs. This degraded performance for A40/RTX3090 and RTX 4090 GPUs slightly.
313313

314-
### 0.41.0
314+
### 0.41.1
315315

316316
Bug fixes:
317317
- Fixed bugs in dynamic exponent data type creation. Thank you @RossM, @KohakuBlueleaf, @ArrowM #659 #227 #262 #152
318+
319+
### 0.41.2
320+
321+
Feature:
322+
- 4-bit serialization now supported. This enables 4-bit load/store. Thank you @poedator #753
323+
324+
### 0.41.3
325+
326+
Bug fixes:
327+
- Fixed an issue where 4-bit serialization would fail for layers without double quantization #868. Thank you, @poedator
328+
- Fixed an issue where calling .to() or .cuda() on a 4-bit layer twice would result in an error #867. Thank you, @jph00
329+

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -146,13 +146,13 @@ For upcoming features and changes and full history see [Patch Notes](CHANGELOG.m
146146
To compile from source, you need an installation of CUDA. If `nvcc` is not installed, you can install the CUDA Toolkit with nvcc through the following commands.
147147

148148
```bash
149-
wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh
149+
wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/install_cuda.sh
150150
# Syntax cuda_install CUDA_VERSION INSTALL_PREFIX EXPORT_TO_BASH
151-
# CUDA_VERSION in {110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121}
151+
# CUDA_VERSION in {110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121, 122}
152152
# EXPORT_TO_BASH in {0, 1} with 0=False and 1=True
153153

154-
# For example, the following installs CUDA 11.8 to ~/local/cuda-11.8 and exports the path to your .bashrc
155-
bash cuda install 118 ~/local 1
154+
# For example, the following installs CUDA 11.7 to ~/local/cuda-11.7 and exports the path to your .bashrc
155+
bash install_cuda.sh 117 ~/local 1
156156
```
157157

158158
To use a specific CUDA version just for a single compile run, you can set the variable `CUDA_HOME`, for example the following command compiles `libbitsandbytes_cuda117.so` using compiler flags for cuda11x with the cuda version at `~/local/cuda-11.7`:

bitsandbytes/autograd/_functions.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -496,15 +496,15 @@ class MatMul4Bit(torch.autograd.Function):
496496
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
497497

498498
@staticmethod
499-
def forward(ctx, A, B, out=None, bias=None, state=None):
499+
def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
500500
# default of pytorch behavior if inputs are empty
501501
ctx.is_empty = False
502502
if prod(A.shape) == 0:
503503
ctx.is_empty = True
504504
ctx.A = A
505505
ctx.B = B
506506
ctx.bias = bias
507-
B_shape = state[1]
507+
B_shape = quant_state.shape
508508
if A.shape[-1] == B_shape[0]:
509509
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
510510
else:
@@ -513,10 +513,10 @@ def forward(ctx, A, B, out=None, bias=None, state=None):
513513

514514
# 1. Dequantize
515515
# 2. MatmulnN
516-
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
516+
output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
517517

518518
# 3. Save state
519-
ctx.state = state
519+
ctx.state = quant_state
520520
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
521521

522522
if any(ctx.needs_input_grad[:2]):
@@ -534,7 +534,6 @@ def backward(ctx, grad_output):
534534

535535
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
536536
A, B = ctx.tensors
537-
state = ctx.state
538537

539538
grad_A, grad_B, grad_bias = None, None, None
540539

@@ -563,12 +562,11 @@ def matmul(
563562
return MatMul8bitLt.apply(A, B, out, bias, state)
564563

565564

566-
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
565+
def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
567566
assert quant_state is not None
568567
if A.numel() == A.shape[-1] and A.requires_grad == False:
569-
absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
570-
if A.shape[-1] % blocksize != 0:
571-
warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
568+
if A.shape[-1] % quant_state.blocksize != 0:
569+
warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
572570
return MatMul4Bit.apply(A, B, out, bias, quant_state)
573571
else:
574572
out = F.gemv_4bit(A, B.t(), out, state=quant_state)

bitsandbytes/cuda_setup/env_vars.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ def to_be_ignored(env_var: str, value: str) -> bool:
88
"OLDPWD",
99
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
1010
"SSH_TTY",
11+
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
1112
"HOME", # Linux shell default
1213
"TMUX", # Terminal Multiplexer
1314
"XDG_DATA_DIRS", # XDG: Desktop environment stuff

bitsandbytes/cuda_setup/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ def generate_instructions(self):
6464
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
6565
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
6666
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
67-
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
68-
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
69-
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
67+
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/install_cuda.sh')
68+
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash install_cuda.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
69+
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash install_cuda.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
7070
return
7171

7272
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'

0 commit comments

Comments
 (0)