From 4f3db9f8872bbde12c9398de5689ccecebcccce8 Mon Sep 17 00:00:00 2001 From: Ali Alshaarawy Date: Tue, 1 Oct 2024 18:48:56 +0000 Subject: [PATCH 1/5] upggrade requiremnets.txt --- requirements/fabric/strategies.txt | 2 +- requirements/pytorch/extra.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/fabric/strategies.txt b/requirements/fabric/strategies.txt index 4aee89d9f68e7..2c676efa4ed0f 100644 --- a/requirements/fabric/strategies.txt +++ b/requirements/fabric/strategies.txt @@ -6,4 +6,4 @@ # note: is a bug around 0.10 with `MPS_Accelerator must implement all abstract methods` # shall be resolved by https://github.com/microsoft/DeepSpeed/issues/4372 deepspeed >=0.8.2, <=0.9.3; platform_system != "Windows" and platform_system != "Darwin" # strict -bitsandbytes >=0.42.0,<0.43.0 +bitsandbytes >=0.44.0,<0.44.2 diff --git a/requirements/pytorch/extra.txt b/requirements/pytorch/extra.txt index 6962da858c4ab..1b75a3ecaa1de 100644 --- a/requirements/pytorch/extra.txt +++ b/requirements/pytorch/extra.txt @@ -8,4 +8,4 @@ hydra-core >=1.2.0, <1.4.0 jsonargparse[signatures] >=4.27.7, <4.28.0 rich >=12.3.0, <13.6.0 tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute -bitsandbytes >=0.42.0,<0.43.0 +bitsandbytes >=0.44.0,<0.44.2 From 7365d1ec689a5c422504902ab54413658ff23f9c Mon Sep 17 00:00:00 2001 From: Ali Alshaarawy Date: Wed, 2 Oct 2024 01:55:32 +0000 Subject: [PATCH 2/5] update fabric bitsandbytes linear quantization for bnb 0.44.1 --- src/lightning/fabric/plugins/precision/bitsandbytes.py | 5 +++-- src/lightning/pytorch/plugins/precision/bitsandbytes.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/lightning/fabric/plugins/precision/bitsandbytes.py b/src/lightning/fabric/plugins/precision/bitsandbytes.py index 0f524dd67fad9..bd8ba8733c3ad 100644 --- a/src/lightning/fabric/plugins/precision/bitsandbytes.py +++ b/src/lightning/fabric/plugins/precision/bitsandbytes.py @@ -39,11 +39,11 @@ log = logging.getLogger(__name__) -_BITSANDBYTES_AVAILABLE = RequirementCache("bitsandbytes>=0.42.0") +_BITSANDBYTES_AVAILABLE = RequirementCache("bitsandbytes>=0.44.0") class BitsandbytesPrecision(Precision): - """Plugin for quantizing weights with `bitsandbytes `__. + """Plugin for quantizing weights with `bitsandbytes `__. .. warning:: This is an :ref:`experimental ` feature. @@ -322,6 +322,7 @@ def quantize_(self, weight: Optional[torch.Tensor] = None, device: Optional[torc return assert isinstance(self.weight, bnb.nn.Params4bit) self.weight = self.quantize(self.weight, weight, device) + self.weight.bnb_quantized = True @staticmethod def quantize( diff --git a/src/lightning/pytorch/plugins/precision/bitsandbytes.py b/src/lightning/pytorch/plugins/precision/bitsandbytes.py index 62acc7bf77c8d..3a2daa828bc3c 100644 --- a/src/lightning/pytorch/plugins/precision/bitsandbytes.py +++ b/src/lightning/pytorch/plugins/precision/bitsandbytes.py @@ -16,7 +16,7 @@ class BitsandbytesPrecision(Precision, FabricBNBPrecision): - """Plugin for quantizing weights with `bitsandbytes `__. + """Plugin for quantizing weights with `bitsandbytes `__. .. warning:: This is an :ref:`experimental ` feature. From 17273b4bf65d4eaf1476e312dd12e7c6f0d55b12 Mon Sep 17 00:00:00 2001 From: Ali Alshaarawy Date: Wed, 2 Oct 2024 02:32:30 +0000 Subject: [PATCH 3/5] add quant_storage param --- .../fabric/plugins/precision/bitsandbytes.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/lightning/fabric/plugins/precision/bitsandbytes.py b/src/lightning/fabric/plugins/precision/bitsandbytes.py index bd8ba8733c3ad..83dfcf28a7f89 100644 --- a/src/lightning/fabric/plugins/precision/bitsandbytes.py +++ b/src/lightning/fabric/plugins/precision/bitsandbytes.py @@ -184,11 +184,15 @@ def _replace_param( if param.device.type == "meta": if isinstance(param, bnb.nn.Params4bit): return bnb.nn.Params4bit( - data, - requires_grad=data.requires_grad, - quant_state=quant_state, - compress_statistics=param.compress_statistics, - quant_type=param.quant_type, + data = data, + requires_grad = data.requires_grad, + quant_state = quant_state, + blocksize = param.blocksize, + compress_statistics = param.compress_statistics, + quant_type = param.quant_type, + quant_storage = param.quant_storage, + module = param.module, + bnb_quantized = param.bnb_quantized ) return torch.nn.Parameter(data, requires_grad=data.requires_grad) param.data = data @@ -338,6 +342,7 @@ def quantize( blocksize=params4bit.blocksize, compress_statistics=params4bit.compress_statistics, quant_type=params4bit.quant_type, + quant_storage=params4bit.quant_storage, ) return _replace_param(params4bit, w_4bit, quant_state) From 8fba466ad4a2c5f426b976ee514b2a345502aa9d Mon Sep 17 00:00:00 2001 From: Ali Alshaarawy Date: Wed, 2 Oct 2024 13:11:23 +0000 Subject: [PATCH 4/5] exclude macos from bnb upgrade --- requirements/fabric/strategies.txt | 3 ++- requirements/pytorch/extra.txt | 3 ++- src/lightning/fabric/plugins/precision/bitsandbytes.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/requirements/fabric/strategies.txt b/requirements/fabric/strategies.txt index 2c676efa4ed0f..394aceb39cd6b 100644 --- a/requirements/fabric/strategies.txt +++ b/requirements/fabric/strategies.txt @@ -6,4 +6,5 @@ # note: is a bug around 0.10 with `MPS_Accelerator must implement all abstract methods` # shall be resolved by https://github.com/microsoft/DeepSpeed/issues/4372 deepspeed >=0.8.2, <=0.9.3; platform_system != "Windows" and platform_system != "Darwin" # strict -bitsandbytes >=0.44.0,<0.44.2 +bitsandbytes >=0.44.0,<0.44.2; sys_platform == 'linux' or sys_platform == 'win32' +bitsandbytes >=0.42.0,<0.43.0 ; sys_platform == 'darwin' diff --git a/requirements/pytorch/extra.txt b/requirements/pytorch/extra.txt index 1b75a3ecaa1de..12bbdf5a70ab0 100644 --- a/requirements/pytorch/extra.txt +++ b/requirements/pytorch/extra.txt @@ -8,4 +8,5 @@ hydra-core >=1.2.0, <1.4.0 jsonargparse[signatures] >=4.27.7, <4.28.0 rich >=12.3.0, <13.6.0 tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute -bitsandbytes >=0.44.0,<0.44.2 +bitsandbytes >=0.44.0,<0.44.2; sys_platform == 'linux' or sys_platform == 'win32' +bitsandbytes >=0.42.0,<0.43.0 ; sys_platform == 'darwin' diff --git a/src/lightning/fabric/plugins/precision/bitsandbytes.py b/src/lightning/fabric/plugins/precision/bitsandbytes.py index 83dfcf28a7f89..4c31e5c7e5d28 100644 --- a/src/lightning/fabric/plugins/precision/bitsandbytes.py +++ b/src/lightning/fabric/plugins/precision/bitsandbytes.py @@ -39,7 +39,7 @@ log = logging.getLogger(__name__) -_BITSANDBYTES_AVAILABLE = RequirementCache("bitsandbytes>=0.44.0") +_BITSANDBYTES_AVAILABLE = RequirementCache("bitsandbytes>=0.42.0") class BitsandbytesPrecision(Precision): From 06ff58087c2dc98e647c2adff22aae55cd7be3af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 13:55:01 +0000 Subject: [PATCH 5/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../fabric/plugins/precision/bitsandbytes.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/lightning/fabric/plugins/precision/bitsandbytes.py b/src/lightning/fabric/plugins/precision/bitsandbytes.py index 4c31e5c7e5d28..394415452890a 100644 --- a/src/lightning/fabric/plugins/precision/bitsandbytes.py +++ b/src/lightning/fabric/plugins/precision/bitsandbytes.py @@ -184,15 +184,15 @@ def _replace_param( if param.device.type == "meta": if isinstance(param, bnb.nn.Params4bit): return bnb.nn.Params4bit( - data = data, - requires_grad = data.requires_grad, - quant_state = quant_state, - blocksize = param.blocksize, - compress_statistics = param.compress_statistics, - quant_type = param.quant_type, - quant_storage = param.quant_storage, - module = param.module, - bnb_quantized = param.bnb_quantized + data=data, + requires_grad=data.requires_grad, + quant_state=quant_state, + blocksize=param.blocksize, + compress_statistics=param.compress_statistics, + quant_type=param.quant_type, + quant_storage=param.quant_storage, + module=param.module, + bnb_quantized=param.bnb_quantized, ) return torch.nn.Parameter(data, requires_grad=data.requires_grad) param.data = data