Skip to content

Commit 67bc7c0

Browse files
Apply style fixes
1 parent f950380 commit 67bc7c0

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1976,7 +1976,9 @@ def _maybe_expand_transformer_param_shape_or_error_(
19761976
if isinstance(module, torch.nn.Linear):
19771977
is_bnb_4bit_quantized = module.weight.__class__.__name__ == "Params4bit"
19781978
if is_bnb_4bit_quantized and not is_bitsandbytes_available():
1979-
raise ValueError("The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints.")
1979+
raise ValueError(
1980+
"The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints."
1981+
)
19801982
elif is_bnb_4bit_quantized:
19811983
module_weight = dequantize_bnb_weight(module.weight, state=module.weight.quant_state).data
19821984
else:

0 commit comments

Comments
 (0)