@@ -52,7 +52,7 @@ def _set_layer_param(
5252    param : torch .Tensor ,
5353    layer_name : str ,
5454) ->  None :
55-     """Helper""" 
55+     """Helper. """ 
5656    layer  =  getattr (dpgrucell , layer_name )
5757    if  "weight"  in  name :
5858        layer .weight  =  torch .nn .Parameter (deepcopy (param ))
@@ -94,7 +94,7 @@ def params(
9494
9595
9696class  DPOptimizer (OpacusDPOptimizer ):
97-     """Brainiac-2's DP-Optimizer""" 
97+     """Brainiac-2's DP-Optimizer. """ 
9898
9999    def  __init__ (
100100        self ,
@@ -108,21 +108,20 @@ def __init__(
108108
109109    @property  
110110    def  params (self ) ->  list [torch .nn .Parameter ]:
111-         """ 
112-         Returns a flat list of ``nn.Parameter`` managed by the optimizer 
113-         """ 
111+         """Returns a flat list of ``nn.Parameter`` managed by the optimizer.""" 
114112        return  params (self , self .param_group_names )
115113
116114
117115class  DifferentialPrivacy (pl .callbacks .EarlyStopping ):
118-     """Enables differential privacy using Opacus. 
119-     Converts optimizers to instances of the  :class:`~opacus.optimizers.DPOptimizer` class. 
120-     This callback inherits from `EarlyStopping`, thus it is also able to stop the  
121-     training when enough privacy budget has been spent . 
122-     Please beware that Opacus does not support multi-optimizer training. 
116+     """Enables differential privacy using Opacus. Converts optimizers to instances of the  
117+     :class:`~opacus.optimizers.DPOptimizer` class. This callback inherits from `EarlyStopping`, thus it is also able to  
118+     stop the training when enough privacy budget has been spent. Please beware that Opacus does not support multi-  
119+     optimizer training . 
120+ 
123121    For more info, check the following links: 
124122    * https://opacus.ai/tutorials/ 
125123    * https://blog.openmined.org/differentially-private-deep-learning-using-opacus-in-20-lines-of-code/ 
124+ 
126125    """ 
127126
128127    def  __init__ (
@@ -140,6 +139,7 @@ def __init__(
140139        ** gsm_kwargs : ty .Any ,
141140    ) ->  None :
142141        """Enables differential privacy using Opacus. 
142+ 
143143        Converts optimizers to instances of the :class:`~opacus.optimizers.DPOptimizer` class. 
144144        This callback inherits from `EarlyStopping`, 
145145        thus it is also able to stop the training when enough privacy budget has been spent. 
@@ -177,6 +177,7 @@ def __init__(
177177                Whether to make the dataloader private. Defaults to False. 
178178            **gsm_kwargs: 
179179                Input arguments for the :class:`~opacus.GradSampleModule` class. 
180+ 
180181        """ 
181182        # inputs 
182183        self .budget  =  budget 
@@ -227,7 +228,11 @@ def on_train_epoch_start(
227228        trainer : pl .Trainer ,
228229        pl_module : pl .LightningModule ,
229230    ) ->  None :
230-         """Called when the training epoch begins. Use this to make optimizers private.""" 
231+         """Called when the training epoch begins. 
232+ 
233+         Use this to make optimizers private. 
234+ 
235+         """ 
231236        # idx 
232237        if  self .idx  is  None :
233238            self .idx  =  range (len (trainer .optimizers ))
@@ -289,7 +294,11 @@ def on_train_batch_end(  # pylint: disable=unused-argument # type: ignore
289294        batch_idx : int ,
290295        * args : ty .Any ,
291296    ) ->  None :
292-         """Called after the batched has been digested. Use this to understand whether to stop or not.""" 
297+         """Called after the batched has been digested. 
298+ 
299+         Use this to understand whether to stop or not. 
300+ 
301+         """ 
293302        self ._log_and_stop_criterion (trainer , pl_module )
294303
295304    def  on_train_epoch_end (
0 commit comments