Skip to content

Commit bfb030f

Browse files
authored
Merge pull request #832 from michaelmior/patch-1
Fix parameter name in error message
2 parents ef4b079 + 04e2089 commit bfb030f

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

bitsandbytes/nn/modules.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,10 +220,10 @@ def set_compute_type(self, x):
220220
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
221221
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
222222
# warn the user about this
223-
warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.')
223+
warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
224224
warnings.filterwarnings('ignore', message='.*inference.')
225225
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
226-
warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.')
226+
warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
227227
warnings.filterwarnings('ignore', message='.*inference or training')
228228

229229
def _save_to_state_dict(self, destination, prefix, keep_vars):

0 commit comments

Comments
 (0)