diff --git a/requirements/fabric/strategies.txt b/requirements/fabric/strategies.txt index 5b7f170cbd866..5be2eed05284c 100644 --- a/requirements/fabric/strategies.txt +++ b/requirements/fabric/strategies.txt @@ -6,4 +6,4 @@ # note: is a bug around 0.10 with `MPS_Accelerator must implement all abstract methods` # shall be resolved by https://github.com/microsoft/DeepSpeed/issues/4372 deepspeed >=0.8.2, <=0.9.3; platform_system != "Windows" and platform_system != "Darwin" # strict -bitsandbytes >=0.45.2,<0.45.3; platform_system != "Darwin" +bitsandbytes >=0.45.2,<0.47.0; platform_system != "Darwin" diff --git a/requirements/pytorch/extra.txt b/requirements/pytorch/extra.txt index f205ba1298a8a..2579b701f1faf 100644 --- a/requirements/pytorch/extra.txt +++ b/requirements/pytorch/extra.txt @@ -8,4 +8,4 @@ hydra-core >=1.2.0, <1.4.0 jsonargparse[signatures] >=4.39.0, <4.41.0 rich >=12.3.0, <14.1.0 tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute -bitsandbytes >=0.45.2,<0.45.3; platform_system != "Darwin" +bitsandbytes >=0.45.2,<0.47.0; platform_system != "Darwin" diff --git a/src/lightning/fabric/plugins/precision/bitsandbytes.py b/src/lightning/fabric/plugins/precision/bitsandbytes.py index 8bda93b84e243..8a71a25bb914f 100644 --- a/src/lightning/fabric/plugins/precision/bitsandbytes.py +++ b/src/lightning/fabric/plugins/precision/bitsandbytes.py @@ -256,9 +256,10 @@ def quantize( if int8params.has_fp16_weights: int8params.data = B else: - CB, CBt, SCB, SCBt, _ = bnb.functional.double_quant(B) - del CBt - del SCBt + if hasattr(bnb.functional, "double_quant"): + CB, _, SCB, _, _ = bnb.functional.double_quant(B) + else: # for bitsandbytes versions ≥0.46 + CB, SCB = bnb.functional.int8_double_quant(B) int8params.data = CB setattr(int8params, "CB", CB) setattr(int8params, "SCB", SCB)