Skip to content

Commit 503ad2e

Browse files
authored
Merge pull request #90 from kozistr/docs/docstring
[Refactor/Docs] Organize Class docstring & Add custom exceptions
2 parents 07dd145 + 6c7968c commit 503ad2e

37 files changed

+740
-589
lines changed

README.rst

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,22 @@ Citations
303303

304304
`Adai <https://github.com/zeke-xie/adaptive-inertia-adai#citing>`__
305305

306+
Citation
307+
--------
308+
309+
Please cite original authors of optimization algorithms. If you use this software, please cite it as below.
310+
Or you can get from "cite this repository" button.
311+
312+
::
313+
314+
@software{Kim_pytorch_optimizer_Bunch_of_2022,
315+
author = {Kim, Hyeongchan},
316+
month = {1},
317+
title = {{pytorch_optimizer: Bunch of optimizer implementations in PyTorch with clean-code, strict types}},
318+
version = {1.0.0},
319+
year = {2022}
320+
}
321+
306322
Author
307323
------
308324

docs/api.rst

Lines changed: 194 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
Implemented Optimizers
2+
====================
3+
4+
.. _AdaBelief:
5+
6+
AdaBelief
7+
---------
8+
9+
.. autoclass:: pytorch_optimizer.AdaBelief
10+
:members:
11+
12+
.. _AdaBound:
13+
14+
AdaBound
15+
--------
16+
17+
.. autoclass:: pytorch_optimizer.AdaBound
18+
:members:
19+
20+
.. _Adai:
21+
22+
Adai
23+
----
24+
25+
.. autoclass:: pytorch_optimizer.Adai
26+
:members:
27+
28+
.. _AdamP:
29+
30+
AdamP
31+
-----
32+
33+
.. autoclass:: pytorch_optimizer.AdamP
34+
:members:
35+
36+
.. _Adan:
37+
38+
Adan
39+
----
40+
41+
.. autoclass:: pytorch_optimizer.Adan
42+
:members:
43+
44+
.. _AdaPNM:
45+
46+
AdaPNM
47+
------
48+
49+
.. autoclass:: pytorch_optimizer.AdaPNM
50+
:members:
51+
52+
.. _AGC:
53+
54+
AGC
55+
---
56+
57+
.. autoclass:: pytorch_optimizer.AGC
58+
:members:
59+
60+
.. _diffGrad:
61+
62+
diffGrad
63+
--------
64+
65+
.. autoclass:: pytorch_optimizer.DiffGrad
66+
:members:
67+
68+
.. _diffRGrad:
69+
70+
diffRGrad
71+
---------
72+
73+
.. autoclass:: pytorch_optimizer.DiffRGrad
74+
:members:
75+
76+
.. _GC:
77+
78+
GC
79+
--
80+
81+
.. autoclass:: pytorch_optimizer.centralize_gradient
82+
:members:
83+
84+
.. _Lamb:
85+
86+
Lamb
87+
----
88+
89+
.. autoclass:: pytorch_optimizer.Lamb
90+
:members:
91+
92+
.. _LARS:
93+
94+
LARS
95+
----
96+
97+
.. autoclass:: pytorch_optimizer.LARS
98+
:members:
99+
100+
.. _Lookahead:
101+
102+
Lookahead
103+
---------
104+
105+
.. autoclass:: pytorch_optimizer.Lookahead
106+
:members:
107+
108+
.. _MADGRAD:
109+
110+
MADGRAD
111+
-------
112+
113+
.. autoclass:: pytorch_optimizer.MADGRAD
114+
:members:
115+
116+
.. _Nero:
117+
118+
Nero
119+
----
120+
121+
.. autoclass:: pytorch_optimizer.Nero
122+
:members:
123+
124+
.. _PCGrad:
125+
126+
PCGrad
127+
------
128+
129+
.. autoclass:: pytorch_optimizer.PCGrad
130+
:members:
131+
132+
.. _PNM:
133+
134+
PNM
135+
---
136+
137+
.. autoclass:: pytorch_optimizer.PNM
138+
:members:
139+
140+
.. _RAdam:
141+
142+
RAdam
143+
-----
144+
145+
.. autoclass:: pytorch_optimizer.RAdam
146+
:members:
147+
148+
.. _RaLamb:
149+
150+
RaLamb
151+
------
152+
153+
.. autoclass:: pytorch_optimizer.RaLamb
154+
:members:
155+
156+
.. _Ranger:
157+
158+
Ranger
159+
------
160+
161+
.. autoclass:: pytorch_optimizer.Ranger
162+
:members:
163+
164+
.. _Ranger21:
165+
166+
Ranger21
167+
--------
168+
169+
.. autoclass:: pytorch_optimizer.Ranger21
170+
:members:
171+
172+
.. _SAM:
173+
174+
SAM
175+
---
176+
177+
.. autoclass:: pytorch_optimizer.SAM
178+
:members:
179+
180+
.. _SGDP:
181+
182+
SGDP
183+
----
184+
185+
.. autoclass:: pytorch_optimizer.SGDP
186+
:members:
187+
188+
.. _Shampoo:
189+
190+
Shampoo
191+
-------
192+
193+
.. autoclass:: pytorch_optimizer.Shampoo
194+
:members:

docs/conf.py

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,18 @@
1212
#
1313
# import os
1414
# import sys
15-
# sys.path.insert(0, os.path.abspath('.'))
15+
# sys.path.insert(0, os.path.abspath('../'))
1616

17+
import sphinx_rtd_theme
1718

1819
# -- Project information -----------------------------------------------------
1920

2021
project = 'pytorch-optimizers'
21-
copyright = '2021, kozistr'
22+
copyright = '2023, kozistr'
2223
author = 'kozistr'
2324

2425
# The full version, including alpha/beta/rc tags
25-
release = '0.0.6'
26+
release = '2.1.0'
2627

2728

2829
# -- General configuration ---------------------------------------------------
@@ -31,6 +32,16 @@
3132
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
3233
# ones.
3334
extensions = [
35+
'sphinx_rtd_theme',
36+
'sphinx.ext.autodoc',
37+
'sphinx.ext.napoleon',
38+
'sphinx.ext.doctest',
39+
'sphinx.ext.todo',
40+
'sphinx.ext.coverage',
41+
'sphinx.ext.mathjax',
42+
'sphinx.ext.ifconfig',
43+
'sphinx.ext.viewcode',
44+
'sphinx.ext.intersphinx',
3445
]
3546

3647
# Add any paths that contain templates here, relative to this directory.
@@ -47,9 +58,25 @@
4758
# The theme to use for HTML and HTML Help pages. See the documentation for
4859
# a list of builtin themes.
4960
#
50-
html_theme = 'alabaster'
61+
html_theme = 'sphinx_rtd_theme'
5162

5263
# Add any paths that contain custom static files (such as style sheets) here,
5364
# relative to this directory. They are copied after the builtin static files,
5465
# so a file named "default.css" will overwrite the builtin "default.css".
5566
html_static_path = ['_static']
67+
68+
html_theme_options = {
69+
'analytics_anonymize_ip': False,
70+
'logo_only': False,
71+
'display_version': True,
72+
'prev_next_buttons_location': 'bottom',
73+
'style_external_links': False,
74+
'vcs_pageview_mode': '',
75+
'style_nav_header_background': 'white',
76+
# Toc options
77+
'collapse_navigation': True,
78+
'sticky_navigation': True,
79+
'navigation_depth': 4,
80+
'includehidden': True,
81+
'titles_only': False,
82+
}

docs/index.rst

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,22 @@ Welcome to pytorch-optimizers's documentation!
77
==============================================
88

99
.. include:: ../README.rst
10+
11+
.. toctree::
12+
:maxdepth: 2
13+
:caption: Contents:
14+
15+
Contents
16+
--------
17+
18+
.. toctree::
19+
:maxdepth: 2
20+
21+
api
22+
23+
Indices and tables
24+
==================
25+
26+
* :ref:`genindex`
27+
* :ref:`modindex`
28+
* :ref:`search`

hubconf.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,3 @@
1-
"""
2-
PyTorch Hub models
3-
Usage:
4-
import torch
5-
optimizer = torch.hub.load('kozistr/pytorch_optimizer', 'adamp')
6-
"""
71
from functools import partial as _partial
82
from functools import update_wrapper as _update_wrapper
93

@@ -17,13 +11,13 @@
1711
for _optimizer in _get_supported_optimizers():
1812
name: str = _optimizer.__name__
1913
_func = _partial(_load_optimizer, optimizer=name)
20-
_update_wrapper(_func, _optimizer.__init__)
14+
_update_wrapper(_func, _optimizer)
2115
for n in (name, name.lower(), name.upper()):
2216
globals()[n] = _func
2317

2418
for _scheduler in _get_supported_lr_schedulers():
2519
name: str = _scheduler.__name__
2620
_func = _partial(_load_lr_scheduler, lr_scheduler=name)
27-
_update_wrapper(_func, _scheduler.__init__)
21+
_update_wrapper(_func, _scheduler)
2822
for n in (name, name.lower(), name.upper()):
2923
globals()[n] = _func

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "pytorch_optimizer"
3-
version = "2.1.0"
3+
version = "2.1.1"
44
description = "Bunch of optimizer implementations in PyTorch with clean-code, strict types. Also, including useful optimization ideas."
55
license = "Apache-2.0"
66
authors = ["kozistr <[email protected]>"]
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
class NoSparseGradientError(Exception):
2+
"""Raised when the gradient is sparse gradient
3+
4+
:param optimizer_name: str. optimizer name.
5+
:param note: str. special conditions to note (default '').
6+
"""
7+
8+
def __init__(self, optimizer_name: str, note: str = ''):
9+
self.note: str = ' ' if note == '' else f' w/ {note} '
10+
self.message: str = f'[-] {optimizer_name}{self.note}does not support sparse gradient.'
11+
super().__init__(self.message)
12+
13+
14+
class ZeroParameterSizeError(Exception):
15+
"""Raised when the parameter size is 0"""
16+
17+
def __init__(self):
18+
self.message: str = '[-] parameter size is 0'
19+
super().__init__(self.message)
20+
21+
22+
class NoClosureError(Exception):
23+
"""Raised when there's no closure function"""
24+
25+
def __init__(self, optimizer_name: str):
26+
self.message: str = f'[-] {optimizer_name} requires closure.'
27+
super().__init__(self.message)

pytorch_optimizer/lr_scheduler/chebyshev.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,17 @@
22

33

44
def chebyshev_steps(small_m: float, big_m: float, num_epochs: int) -> np.ndarray:
5-
"""
5+
"""chebyshev_steps
6+
67
:param small_m: float. stands for 'm' notation.
78
:param big_m: float. stands for 'M' notation.
89
:param num_epochs: int. stands for 'T' notation.
910
:return: np.array. chebyshev_steps
1011
"""
12+
1113
c, r = (big_m + small_m) / 2.0, (big_m - small_m) / 2.0
1214
thetas = (np.arange(num_epochs) + 0.5) / num_epochs * np.pi
15+
1316
return 1.0 / (c - r * np.cos(thetas))
1417

1518

0 commit comments

Comments
 (0)