Skip to content

Commit b5d6135

Browse files
Merge branch 'int8' of https://github.com/TimDettmers/bitsandbytes into int8
2 parents a72c463 + 980279f commit b5d6135

File tree

1 file changed

+9
-5
lines changed

1 file changed

+9
-5
lines changed

bitsandbytes/functional.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2223,7 +2223,7 @@ def batched_igemm(
22232223

22242224

22252225
@deprecated(
2226-
"igemmlt is deprecated and will be removed in a future release. " "Please use int8_linear_matmul instead.",
2226+
"igemmlt is deprecated and will be removed in a future release. Please use int8_linear_matmul instead.",
22272227
category=FutureWarning,
22282228
)
22292229
def igemmlt(
@@ -2365,9 +2365,9 @@ def mm_dequant(
23652365

23662366
def get_colrow_absmax(
23672367
A: torch.Tensor,
2368-
row_stats: torch.Tensor = None,
2369-
col_stats: torch.Tensor = None,
2370-
nnz_block_ptr: torch.Tensor = None,
2368+
row_stats: Optional[torch.Tensor] = None,
2369+
col_stats: Optional[torch.Tensor] = None,
2370+
nnz_block_ptr: Optional[torch.Tensor] = None,
23712371
threshold=0.0,
23722372
):
23732373
# Note: prior impl only works with fp16
@@ -2614,7 +2614,11 @@ def transform(A, to_order, from_order="row", out=None, transpose=False, state=No
26142614
return out, new_state
26152615

26162616

2617-
def spmm_coo(cooA: Union[COOSparseTensor, torch.Tensor], B: torch.Tensor, out: torch.Tensor = None):
2617+
def spmm_coo(
2618+
cooA: Union[COOSparseTensor, torch.Tensor],
2619+
B: torch.Tensor,
2620+
out: Optional[torch.Tensor] = None,
2621+
):
26182622
if not isinstance(cooA, COOSparseTensor):
26192623
assert (
26202624
cooA.is_sparse and cooA.layout == torch.sparse_coo

0 commit comments

Comments
 (0)