Skip to content

Commit 0b07840

Browse files
committed
Simplify statements into equivalent, modern variants
via pyupgrade --py37-plus. The changes e.g. are subclassing from object, calling super() with super(ThisClass, self), or old-style syntax formatting.
1 parent 1eec77d commit 0b07840

File tree

17 files changed

+103
-105
lines changed

17 files changed

+103
-105
lines changed

bitsandbytes/autograd/_functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def prod(iterable):
1818
This is particularly important for small models where outlier features
1919
are less systematic and occur with low frequency.
2020
"""
21-
class GlobalOutlierPooler(object):
21+
class GlobalOutlierPooler:
2222
_instance = None
2323

2424
def __init__(self):

bitsandbytes/cextension.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from warnings import warn
66

77

8-
class CUDASetup(object):
8+
class CUDASetup:
99
_instance = None
1010

1111
def __init__(self):

bitsandbytes/cuda_setup/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ def evaluate_cuda_setup():
127127
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
128128
return binary_name
129129

130-
cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}"))
130+
cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
131131
cuda = get_cuda_lib_handle()
132132
cc = get_compute_capability(cuda)
133133
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")

bitsandbytes/functional.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def prod(iterable):
8282
)
8383

8484

85-
class CUBLAS_Context(object):
85+
class CUBLAS_Context:
8686
_instance = None
8787

8888
def __init__(self):
@@ -112,7 +112,7 @@ def get_context(self, device):
112112
return self.context[device.index]
113113

114114

115-
class Cusparse_Context(object):
115+
class Cusparse_Context:
116116
_instance = None
117117

118118
def __init__(self):
@@ -1417,7 +1417,7 @@ def get_colrow_absmax(
14171417
return row_stats, col_stats, nnz_block_ptr
14181418

14191419

1420-
class COOSparseTensor(object):
1420+
class COOSparseTensor:
14211421
def __init__(self, rows, cols, nnz, rowidx, colidx, values):
14221422
assert rowidx.dtype == torch.int32
14231423
assert colidx.dtype == torch.int32
@@ -1434,7 +1434,7 @@ def __init__(self, rows, cols, nnz, rowidx, colidx, values):
14341434
self.values = values
14351435

14361436

1437-
class CSRSparseTensor(object):
1437+
class CSRSparseTensor:
14381438
def __init__(self, rows, cols, nnz, rowptr, colidx, values):
14391439
assert rowptr.dtype == torch.int32
14401440
assert colidx.dtype == torch.int32
@@ -1451,7 +1451,7 @@ def __init__(self, rows, cols, nnz, rowptr, colidx, values):
14511451
self.values = values
14521452

14531453

1454-
class CSCSparseTensor(object):
1454+
class CSCSparseTensor:
14551455
def __init__(self, rows, cols, nnz, colptr, rowidx, values):
14561456
assert colptr.dtype == torch.int32
14571457
assert rowidx.dtype == torch.int32

bitsandbytes/nn/modules.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
sparse: bool = False,
4040
_weight: Optional[Tensor] = None,
4141
) -> None:
42-
super(StableEmbedding, self).__init__(
42+
super().__init__(
4343
num_embeddings,
4444
embedding_dim,
4545
padding_idx,
@@ -96,7 +96,7 @@ def __init__(
9696
sparse: bool = False,
9797
_weight: Optional[Tensor] = None,
9898
) -> None:
99-
super(Embedding, self).__init__(
99+
super().__init__(
100100
num_embeddings,
101101
embedding_dim,
102102
padding_idx,
@@ -225,7 +225,7 @@ def __init__(
225225
threshold=0.0,
226226
index=None,
227227
):
228-
super(Linear8bitLt, self).__init__(
228+
super().__init__(
229229
input_features, output_features, bias
230230
)
231231
self.state = bnb.MatmulLtState()

bitsandbytes/optim/adagrad.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,18 @@ def __init__(
2121
block_wise=True,
2222
):
2323
if not 0.0 <= lr:
24-
raise ValueError("Invalid learning rate: {}".format(lr))
24+
raise ValueError(f"Invalid learning rate: {lr}")
2525
if not 0.0 <= weight_decay:
2626
raise ValueError(
27-
"Invalid weight_decay value: {}".format(weight_decay)
27+
f"Invalid weight_decay value: {weight_decay}"
2828
)
2929
if not 0.0 <= eps:
30-
raise ValueError("Invalid epsilon value: {}".format(eps))
30+
raise ValueError(f"Invalid epsilon value: {eps}")
3131
if initial_accumulator_value != 0.0:
3232
raise ValueError("Initial accumulator value != 0.0 not supported!")
3333
if lr_decay != 0.0:
3434
raise ValueError("Lr Decay != 0.0 not supported!")
35-
super(Adagrad, self).__init__(
35+
super().__init__(
3636
"adagrad",
3737
params,
3838
lr,
@@ -63,19 +63,19 @@ def __init__(
6363
block_wise=True,
6464
):
6565
if not 0.0 <= lr:
66-
raise ValueError("Invalid learning rate: {}".format(lr))
66+
raise ValueError(f"Invalid learning rate: {lr}")
6767
if not 0.0 <= weight_decay:
6868
raise ValueError(
69-
"Invalid weight_decay value: {}".format(weight_decay)
69+
f"Invalid weight_decay value: {weight_decay}"
7070
)
7171
if not 0.0 <= eps:
72-
raise ValueError("Invalid epsilon value: {}".format(eps))
72+
raise ValueError(f"Invalid epsilon value: {eps}")
7373
if initial_accumulator_value != 0.0:
7474
raise ValueError("Initial accumulator value != 0.0 not supported!")
7575
if lr_decay != 0.0:
7676
raise ValueError("Lr Decay != 0.0 not supported!")
7777
assert block_wise
78-
super(Adagrad8bit, self).__init__(
78+
super().__init__(
7979
"adagrad",
8080
params,
8181
lr,
@@ -106,18 +106,18 @@ def __init__(
106106
block_wise=True,
107107
):
108108
if not 0.0 <= lr:
109-
raise ValueError("Invalid learning rate: {}".format(lr))
109+
raise ValueError(f"Invalid learning rate: {lr}")
110110
if not 0.0 <= weight_decay:
111111
raise ValueError(
112-
"Invalid weight_decay value: {}".format(weight_decay)
112+
f"Invalid weight_decay value: {weight_decay}"
113113
)
114114
if not 0.0 <= eps:
115-
raise ValueError("Invalid epsilon value: {}".format(eps))
115+
raise ValueError(f"Invalid epsilon value: {eps}")
116116
if initial_accumulator_value != 0.0:
117117
raise ValueError("Initial accumulator value != 0.0 not supported!")
118118
if lr_decay != 0.0:
119119
raise ValueError("Lr Decay != 0.0 not supported!")
120-
super(Adagrad32bit, self).__init__(
120+
super().__init__(
121121
"adagrad",
122122
params,
123123
lr,

bitsandbytes/optim/adam.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def __init__(
2828
percentile_clipping=100,
2929
block_wise=True,
3030
):
31-
super(Adam, self).__init__(
31+
super().__init__(
3232
"adam",
3333
params,
3434
lr,
@@ -57,7 +57,7 @@ def __init__(
5757
percentile_clipping=100,
5858
block_wise=True,
5959
):
60-
super(Adam8bit, self).__init__(
60+
super().__init__(
6161
"adam",
6262
params,
6363
lr,
@@ -86,7 +86,7 @@ def __init__(
8686
percentile_clipping=100,
8787
block_wise=True,
8888
):
89-
super(Adam32bit, self).__init__(
89+
super().__init__(
9090
"adam",
9191
params,
9292
lr,
@@ -146,7 +146,7 @@ def __init__(
146146
weight_decay=weight_decay,
147147
amsgrad=amsgrad,
148148
)
149-
super(AnalysisAdam, self).__init__(params, defaults)
149+
super().__init__(params, defaults)
150150
self.analysis = bnb_analysis
151151
self.savedir = savedir
152152

bitsandbytes/optim/adamw.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def __init__(
2020
percentile_clipping=100,
2121
block_wise=True,
2222
):
23-
super(AdamW, self).__init__(
23+
super().__init__(
2424
"adam",
2525
params,
2626
lr,
@@ -49,7 +49,7 @@ def __init__(
4949
percentile_clipping=100,
5050
block_wise=True,
5151
):
52-
super(AdamW8bit, self).__init__(
52+
super().__init__(
5353
"adam",
5454
params,
5555
lr,
@@ -78,7 +78,7 @@ def __init__(
7878
percentile_clipping=100,
7979
block_wise=True,
8080
):
81-
super(AdamW32bit, self).__init__(
81+
super().__init__(
8282
"adam",
8383
params,
8484
lr,

bitsandbytes/optim/lamb.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(
2323
block_wise=False,
2424
max_unorm=1.0,
2525
):
26-
super(LAMB, self).__init__(
26+
super().__init__(
2727
"lamb",
2828
params,
2929
lr,
@@ -56,7 +56,7 @@ def __init__(
5656
block_wise=False,
5757
max_unorm=1.0,
5858
):
59-
super(LAMB8bit, self).__init__(
59+
super().__init__(
6060
"lamb",
6161
params,
6262
lr,
@@ -89,7 +89,7 @@ def __init__(
8989
block_wise=False,
9090
max_unorm=1.0,
9191
):
92-
super(LAMB32bit, self).__init__(
92+
super().__init__(
9393
"lamb",
9494
params,
9595
lr,

bitsandbytes/optim/lars.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def __init__(
2727
raise NotImplementedError(
2828
f"LARS without momentum is not supported!"
2929
)
30-
super(LARS, self).__init__(
30+
super().__init__(
3131
"lars",
3232
params,
3333
lr,
@@ -61,7 +61,7 @@ def __init__(
6161
raise NotImplementedError(
6262
f"LARS without momentum is not supported!"
6363
)
64-
super(LARS8bit, self).__init__(
64+
super().__init__(
6565
"lars",
6666
params,
6767
lr,
@@ -95,7 +95,7 @@ def __init__(
9595
raise NotImplementedError(
9696
f"LARS without momentum is not supported!"
9797
)
98-
super(LARS32bit, self).__init__(
98+
super().__init__(
9999
"lars",
100100
params,
101101
lr,
@@ -123,12 +123,12 @@ def __init__(
123123
max_unorm=0.02,
124124
):
125125
if lr < 0.0:
126-
raise ValueError("Invalid learning rate: {}".format(lr))
126+
raise ValueError(f"Invalid learning rate: {lr}")
127127
if momentum < 0.0:
128-
raise ValueError("Invalid momentum value: {}".format(momentum))
128+
raise ValueError(f"Invalid momentum value: {momentum}")
129129
if weight_decay < 0.0:
130130
raise ValueError(
131-
"Invalid weight_decay value: {}".format(weight_decay)
131+
f"Invalid weight_decay value: {weight_decay}"
132132
)
133133

134134
defaults = dict(
@@ -143,10 +143,10 @@ def __init__(
143143
raise ValueError(
144144
"Nesterov momentum requires a momentum and zero dampening"
145145
)
146-
super(PytorchLARS, self).__init__(params, defaults)
146+
super().__init__(params, defaults)
147147

148148
def __setstate__(self, state):
149-
super(PytorchLARS, self).__setstate__(state)
149+
super().__setstate__(state)
150150
for group in self.param_groups:
151151
group.setdefault("nesterov", False)
152152

0 commit comments

Comments
 (0)