We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 522ca7b commit e92b73dCopy full SHA for e92b73d
pytorch_optimizer/adamp.py
@@ -38,9 +38,9 @@ def __init__(
38
adamd_debias_term: bool = False,
39
eps: float = 1e-8,
40
):
41
- """
+ """AdamP optimizer
42
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
43
- :param lr: float. learning rate.
+ :param lr: float. learning rate
44
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace
45
:param weight_decay: float. weight decay (L2 penalty)
46
:param delta: float. threshold that determines whether a set of parameters is scale invariant or not
0 commit comments