Skip to content

Commit

Permalink
Merge pull request #90 from kozistr/docs/docstring
Browse files Browse the repository at this point in the history
[Refactor/Docs] Organize Class docstring & Add custom exceptions
  • Loading branch information
kozistr committed Jan 2, 2023
2 parents 07dd145 + 6c7968c commit 503ad2e
Show file tree
Hide file tree
Showing 37 changed files with 740 additions and 589 deletions.
16 changes: 16 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,22 @@ Citations

`Adai <https://github.com/zeke-xie/adaptive-inertia-adai#citing>`__

Citation
--------

Please cite original authors of optimization algorithms. If you use this software, please cite it as below.
Or you can get from "cite this repository" button.

::

@software{Kim_pytorch_optimizer_Bunch_of_2022,
author = {Kim, Hyeongchan},
month = {1},
title = {{pytorch_optimizer: Bunch of optimizer implementations in PyTorch with clean-code, strict types}},
version = {1.0.0},
year = {2022}
}

Author
------

Expand Down
194 changes: 194 additions & 0 deletions docs/api.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
Implemented Optimizers
====================

.. _AdaBelief:

AdaBelief
---------

.. autoclass:: pytorch_optimizer.AdaBelief
:members:

.. _AdaBound:

AdaBound
--------

.. autoclass:: pytorch_optimizer.AdaBound
:members:

.. _Adai:

Adai
----

.. autoclass:: pytorch_optimizer.Adai
:members:

.. _AdamP:

AdamP
-----

.. autoclass:: pytorch_optimizer.AdamP
:members:

.. _Adan:

Adan
----

.. autoclass:: pytorch_optimizer.Adan
:members:

.. _AdaPNM:

AdaPNM
------

.. autoclass:: pytorch_optimizer.AdaPNM
:members:

.. _AGC:

AGC
---

.. autoclass:: pytorch_optimizer.AGC
:members:

.. _diffGrad:

diffGrad
--------

.. autoclass:: pytorch_optimizer.DiffGrad
:members:

.. _diffRGrad:

diffRGrad
---------

.. autoclass:: pytorch_optimizer.DiffRGrad
:members:

.. _GC:

GC
--

.. autoclass:: pytorch_optimizer.centralize_gradient
:members:

.. _Lamb:

Lamb
----

.. autoclass:: pytorch_optimizer.Lamb
:members:

.. _LARS:

LARS
----

.. autoclass:: pytorch_optimizer.LARS
:members:

.. _Lookahead:

Lookahead
---------

.. autoclass:: pytorch_optimizer.Lookahead
:members:

.. _MADGRAD:

MADGRAD
-------

.. autoclass:: pytorch_optimizer.MADGRAD
:members:

.. _Nero:

Nero
----

.. autoclass:: pytorch_optimizer.Nero
:members:

.. _PCGrad:

PCGrad
------

.. autoclass:: pytorch_optimizer.PCGrad
:members:

.. _PNM:

PNM
---

.. autoclass:: pytorch_optimizer.PNM
:members:

.. _RAdam:

RAdam
-----

.. autoclass:: pytorch_optimizer.RAdam
:members:

.. _RaLamb:

RaLamb
------

.. autoclass:: pytorch_optimizer.RaLamb
:members:

.. _Ranger:

Ranger
------

.. autoclass:: pytorch_optimizer.Ranger
:members:

.. _Ranger21:

Ranger21
--------

.. autoclass:: pytorch_optimizer.Ranger21
:members:

.. _SAM:

SAM
---

.. autoclass:: pytorch_optimizer.SAM
:members:

.. _SGDP:

SGDP
----

.. autoclass:: pytorch_optimizer.SGDP
:members:

.. _Shampoo:

Shampoo
-------

.. autoclass:: pytorch_optimizer.Shampoo
:members:
35 changes: 31 additions & 4 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,18 @@
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../'))

import sphinx_rtd_theme

# -- Project information -----------------------------------------------------

project = 'pytorch-optimizers'
copyright = '2021, kozistr'
copyright = '2023, kozistr'
author = 'kozistr'

# The full version, including alpha/beta/rc tags
release = '0.0.6'
release = '2.1.0'


# -- General configuration ---------------------------------------------------
Expand All @@ -31,6 +32,16 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]

# Add any paths that contain templates here, relative to this directory.
Expand All @@ -47,9 +58,25 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

html_theme_options = {
'analytics_anonymize_ip': False,
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'vcs_pageview_mode': '',
'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False,
}
19 changes: 19 additions & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,22 @@ Welcome to pytorch-optimizers's documentation!
==============================================

.. include:: ../README.rst

.. toctree::
:maxdepth: 2
:caption: Contents:

Contents
--------

.. toctree::
:maxdepth: 2

api

Indices and tables
==================

* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
10 changes: 2 additions & 8 deletions hubconf.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,3 @@
"""
PyTorch Hub models
Usage:
import torch
optimizer = torch.hub.load('kozistr/pytorch_optimizer', 'adamp')
"""
from functools import partial as _partial
from functools import update_wrapper as _update_wrapper

Expand All @@ -17,13 +11,13 @@
for _optimizer in _get_supported_optimizers():
name: str = _optimizer.__name__
_func = _partial(_load_optimizer, optimizer=name)
_update_wrapper(_func, _optimizer.__init__)
_update_wrapper(_func, _optimizer)
for n in (name, name.lower(), name.upper()):
globals()[n] = _func

for _scheduler in _get_supported_lr_schedulers():
name: str = _scheduler.__name__
_func = _partial(_load_lr_scheduler, lr_scheduler=name)
_update_wrapper(_func, _scheduler.__init__)
_update_wrapper(_func, _scheduler)
for n in (name, name.lower(), name.upper()):
globals()[n] = _func
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pytorch_optimizer"
version = "2.1.0"
version = "2.1.1"
description = "Bunch of optimizer implementations in PyTorch with clean-code, strict types. Also, including useful optimization ideas."
license = "Apache-2.0"
authors = ["kozistr <kozistr@gmail.com>"]
Expand Down
27 changes: 27 additions & 0 deletions pytorch_optimizer/base/exception.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
class NoSparseGradientError(Exception):
"""Raised when the gradient is sparse gradient
:param optimizer_name: str. optimizer name.
:param note: str. special conditions to note (default '').
"""

def __init__(self, optimizer_name: str, note: str = ''):
self.note: str = ' ' if note == '' else f' w/ {note} '
self.message: str = f'[-] {optimizer_name}{self.note}does not support sparse gradient.'
super().__init__(self.message)


class ZeroParameterSizeError(Exception):
"""Raised when the parameter size is 0"""

def __init__(self):
self.message: str = '[-] parameter size is 0'
super().__init__(self.message)


class NoClosureError(Exception):
"""Raised when there's no closure function"""

def __init__(self, optimizer_name: str):
self.message: str = f'[-] {optimizer_name} requires closure.'
super().__init__(self.message)
5 changes: 4 additions & 1 deletion pytorch_optimizer/lr_scheduler/chebyshev.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,17 @@


def chebyshev_steps(small_m: float, big_m: float, num_epochs: int) -> np.ndarray:
"""
"""chebyshev_steps
:param small_m: float. stands for 'm' notation.
:param big_m: float. stands for 'M' notation.
:param num_epochs: int. stands for 'T' notation.
:return: np.array. chebyshev_steps
"""

c, r = (big_m + small_m) / 2.0, (big_m - small_m) / 2.0
thetas = (np.arange(num_epochs) + 0.5) / num_epochs * np.pi

return 1.0 / (c - r * np.cos(thetas))


Expand Down

0 comments on commit 503ad2e

Please sign in to comment.