Skip to content

Commit 1f5acaa

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 146bbaa commit 1f5acaa

File tree

2 files changed

+28
-18
lines changed

2 files changed

+28
-18
lines changed

src/lightning/pytorch/callbacks/differential_privacy.py

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def _set_layer_param(
5252
param: torch.Tensor,
5353
layer_name: str,
5454
) -> None:
55-
"""Helper"""
55+
"""Helper."""
5656
layer = getattr(dpgrucell, layer_name)
5757
if "weight" in name:
5858
layer.weight = torch.nn.Parameter(deepcopy(param))
@@ -94,7 +94,7 @@ def params(
9494

9595

9696
class DPOptimizer(OpacusDPOptimizer):
97-
"""Brainiac-2's DP-Optimizer"""
97+
"""Brainiac-2's DP-Optimizer."""
9898

9999
def __init__(
100100
self,
@@ -108,21 +108,20 @@ def __init__(
108108

109109
@property
110110
def params(self) -> list[torch.nn.Parameter]:
111-
"""
112-
Returns a flat list of ``nn.Parameter`` managed by the optimizer
113-
"""
111+
"""Returns a flat list of ``nn.Parameter`` managed by the optimizer."""
114112
return params(self, self.param_group_names)
115113

116114

117115
class DifferentialPrivacy(pl.callbacks.EarlyStopping):
118-
"""Enables differential privacy using Opacus.
119-
Converts optimizers to instances of the :class:`~opacus.optimizers.DPOptimizer` class.
120-
This callback inherits from `EarlyStopping`, thus it is also able to stop the
121-
training when enough privacy budget has been spent.
122-
Please beware that Opacus does not support multi-optimizer training.
116+
"""Enables differential privacy using Opacus. Converts optimizers to instances of the
117+
:class:`~opacus.optimizers.DPOptimizer` class. This callback inherits from `EarlyStopping`, thus it is also able to
118+
stop the training when enough privacy budget has been spent. Please beware that Opacus does not support multi-
119+
optimizer training.
120+
123121
For more info, check the following links:
124122
* https://opacus.ai/tutorials/
125123
* https://blog.openmined.org/differentially-private-deep-learning-using-opacus-in-20-lines-of-code/
124+
126125
"""
127126

128127
def __init__(
@@ -140,6 +139,7 @@ def __init__(
140139
**gsm_kwargs: ty.Any,
141140
) -> None:
142141
"""Enables differential privacy using Opacus.
142+
143143
Converts optimizers to instances of the :class:`~opacus.optimizers.DPOptimizer` class.
144144
This callback inherits from `EarlyStopping`,
145145
thus it is also able to stop the training when enough privacy budget has been spent.
@@ -177,6 +177,7 @@ def __init__(
177177
Whether to make the dataloader private. Defaults to False.
178178
**gsm_kwargs:
179179
Input arguments for the :class:`~opacus.GradSampleModule` class.
180+
180181
"""
181182
# inputs
182183
self.budget = budget
@@ -227,7 +228,11 @@ def on_train_epoch_start(
227228
trainer: pl.Trainer,
228229
pl_module: pl.LightningModule,
229230
) -> None:
230-
"""Called when the training epoch begins. Use this to make optimizers private."""
231+
"""Called when the training epoch begins.
232+
233+
Use this to make optimizers private.
234+
235+
"""
231236
# idx
232237
if self.idx is None:
233238
self.idx = range(len(trainer.optimizers))
@@ -289,7 +294,11 @@ def on_train_batch_end( # pylint: disable=unused-argument # type: ignore
289294
batch_idx: int,
290295
*args: ty.Any,
291296
) -> None:
292-
"""Called after the batched has been digested. Use this to understand whether to stop or not."""
297+
"""Called after the batched has been digested.
298+
299+
Use this to understand whether to stop or not.
300+
301+
"""
293302
self._log_and_stop_criterion(trainer, pl_module)
294303

295304
def on_train_epoch_end(

tests/tests_pytorch/callbacks/test_dp.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ def test_privacy_callback() -> None:
6868
* the privacy budget has been spent (`epsilon > 0`);
6969
* spent budget is greater than max privacy budget;
7070
* traininng did not stop because `max_steps` has been reached, but because the total budget has been spent.
71+
7172
"""
7273
# choose dataset
7374
datamodule = MockDataModule()
@@ -92,12 +93,12 @@ def test_privacy_callback() -> None:
9293
epsilon, best_alpha = dp_cb.get_privacy_spent()
9394
print(f"Total spent budget {epsilon} with alpha: {best_alpha}")
9495
assert epsilon > 0, f"No privacy budget has been spent: {epsilon}"
95-
assert (
96-
epsilon >= dp_cb.budget
97-
), f"Spent budget is not greater than max privacy budget: epsilon = {epsilon} and budget = {dp_cb.budget}"
98-
assert (
99-
trainer.global_step < max_steps
100-
), "Traininng stopped because max_steps has been reached, not because the total budget has been spent."
96+
assert epsilon >= dp_cb.budget, (
97+
f"Spent budget is not greater than max privacy budget: epsilon = {epsilon} and budget = {dp_cb.budget}"
98+
)
99+
assert trainer.global_step < max_steps, (
100+
"Traininng stopped because max_steps has been reached, not because the total budget has been spent."
101+
)
101102

102103

103104
if __name__ == "__main__":

0 commit comments

Comments
 (0)