From daf50fa33d7aa226d7822fc44e5599c773fd0e60 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Fri, 17 Oct 2025 11:38:50 +0200 Subject: [PATCH] FIX Bug when dequantizing 4bit bnb weights Fixes some failing GPU tests in CI. A bug was introduced in #2797 where state.SCB was accessed while dequantizing 4bit bnb weights even though state is None. This would occur, for instance, when using DoRA, which needs to dequantize the weight. The attribute access is now restricted to 8bit bnb weights. --- src/peft/utils/integrations.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/peft/utils/integrations.py b/src/peft/utils/integrations.py index dc5ae465db..41ec60d6c3 100644 --- a/src/peft/utils/integrations.py +++ b/src/peft/utils/integrations.py @@ -88,9 +88,6 @@ def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None): """Helper function to dequantize 4bit or 8bit bnb weights.""" import bitsandbytes as bnb - if state.SCB is None: - state.SCB = weight.SCB - device = weight.device cls_name = weight.__class__.__name__ @@ -98,6 +95,16 @@ def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None): dequantized = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) return dequantized + # 8bit case + if state is None: + raise ValueError( + "No `state` was passed for bnb 8bit quantized weights. Please open an issue on the PEFT repository and " + "report the error: https://github.com/huggingface/peft/issues" + ) + + if state.SCB is None: + state.SCB = weight.SCB + if hasattr(bnb.functional, "int8_vectorwise_dequant"): # Use bitsandbytes API if available (requires v0.45.0+) dequantized = bnb.functional.int8_vectorwise_dequant(weight.data, state.SCB)