Skip to content

Commit df71d49

Browse files
authored
Merge branch 'master' into stepwise-lr-scheduler
2 parents fd1883f + 5dea36c commit df71d49

File tree

5 files changed

+17
-5
lines changed

5 files changed

+17
-5
lines changed

docs/source-pytorch/common/progress_bar.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,10 @@ You can update ``refresh_rate`` (rate (number of batches) at which the progress
3636
3737
trainer = Trainer(callbacks=[TQDMProgressBar(refresh_rate=10)])
3838
39+
.. note::
40+
41+
The ``smoothing`` option has no effect when using the default implementation of :class:`~lightning.pytorch.callbacks.TQDMProgressBar`, as the progress bar is updated using the ``bar.refresh()`` method instead of ``bar.update()``. This can cause the progress bar to become desynchronized with the actual progress. To avoid this issue, you can use the ``bar.update()`` method instead, but this may require customizing the :class:`~lightning.pytorch.callbacks.TQDMProgressBar` class.
42+
3943
By default the training progress bar is reset (overwritten) at each new epoch.
4044
If you wish for a new progress bar to be displayed at the end of every epoch, set
4145
:paramref:`TQDMProgressBar.leave <lightning.pytorch.callbacks.TQDMProgressBar.leave>` to ``True``.

requirements/fabric/strategies.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,5 @@
66
# note: is a bug around 0.10 with `MPS_Accelerator must implement all abstract methods`
77
# shall be resolved by https://github.com/microsoft/DeepSpeed/issues/4372
88
deepspeed >=0.8.2, <=0.9.3; platform_system != "Windows" and platform_system != "Darwin" # strict
9-
bitsandbytes >=0.42.0,<0.43.0
9+
bitsandbytes >=0.44.0,<0.44.2; sys_platform == 'linux' or sys_platform == 'win32'
10+
bitsandbytes >=0.42.0,<0.43.0 ; sys_platform == 'darwin'

requirements/pytorch/extra.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,5 @@ hydra-core >=1.2.0, <1.4.0
88
jsonargparse[signatures] >=4.27.7, <4.28.0
99
rich >=12.3.0, <13.6.0
1010
tensorboardX >=2.2, <2.7.0 # min version is set by torch.onnx missing attribute
11-
bitsandbytes >=0.42.0,<0.43.0
11+
bitsandbytes >=0.44.0,<0.44.2; sys_platform == 'linux' or sys_platform == 'win32'
12+
bitsandbytes >=0.42.0,<0.43.0 ; sys_platform == 'darwin'

src/lightning/fabric/plugins/precision/bitsandbytes.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343

4444

4545
class BitsandbytesPrecision(Precision):
46-
"""Plugin for quantizing weights with `bitsandbytes <https://github.com/TimDettmers/bitsandbytes>`__.
46+
"""Plugin for quantizing weights with `bitsandbytes <https://github.com/bitsandbytes-foundation/bitsandbytes>`__.
4747
4848
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
4949
@@ -184,11 +184,15 @@ def _replace_param(
184184
if param.device.type == "meta":
185185
if isinstance(param, bnb.nn.Params4bit):
186186
return bnb.nn.Params4bit(
187-
data,
187+
data=data,
188188
requires_grad=data.requires_grad,
189189
quant_state=quant_state,
190+
blocksize=param.blocksize,
190191
compress_statistics=param.compress_statistics,
191192
quant_type=param.quant_type,
193+
quant_storage=param.quant_storage,
194+
module=param.module,
195+
bnb_quantized=param.bnb_quantized,
192196
)
193197
return torch.nn.Parameter(data, requires_grad=data.requires_grad)
194198
param.data = data
@@ -322,6 +326,7 @@ def quantize_(self, weight: Optional[torch.Tensor] = None, device: Optional[torc
322326
return
323327
assert isinstance(self.weight, bnb.nn.Params4bit)
324328
self.weight = self.quantize(self.weight, weight, device)
329+
self.weight.bnb_quantized = True
325330

326331
@staticmethod
327332
def quantize(
@@ -337,6 +342,7 @@ def quantize(
337342
blocksize=params4bit.blocksize,
338343
compress_statistics=params4bit.compress_statistics,
339344
quant_type=params4bit.quant_type,
345+
quant_storage=params4bit.quant_storage,
340346
)
341347
return _replace_param(params4bit, w_4bit, quant_state)
342348

src/lightning/pytorch/plugins/precision/bitsandbytes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717

1818
class BitsandbytesPrecision(Precision, FabricBNBPrecision):
19-
"""Plugin for quantizing weights with `bitsandbytes <https://github.com/TimDettmers/bitsandbytes>`__.
19+
"""Plugin for quantizing weights with `bitsandbytes <https://github.com/bitsandbytes-foundation/bitsandbytes>`__.
2020
2121
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
2222

0 commit comments

Comments
 (0)