Skip to content

Commit

Permalink
[chore] Documentation fixes, no more ref issues and more API fields (#…
Browse files Browse the repository at this point in the history
…103)

* various fixes, no more issues with `make html` and more API fields should be populated
  • Loading branch information
blefaudeux committed Sep 22, 2020
1 parent b488dcf commit 7c5203e
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 14 deletions.
2 changes: 2 additions & 0 deletions docs/source/api/nn/pipe.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,5 @@ Pipe
====

.. autoclass:: fairscale.nn.Pipe
:members:
:undoc-members:
2 changes: 2 additions & 0 deletions docs/source/api/optim/oss.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,5 @@ OSS
====

.. autoclass:: fairscale.optim.OSS
:members:
:undoc-members:
4 changes: 1 addition & 3 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
]
extensions = ["sphinx.ext.autodoc", "sphinx.ext.autosectionlabel"]

# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
Expand Down
20 changes: 10 additions & 10 deletions fairscale/nn/pipe/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ class Pipe(Module):
``'except_last'``, or ``'never'`` (default: ``'except_last'``)
deferred_batch_norm (bool):
whether to use deferred BatchNorm moving statistics (default:
:data:`False`, see :ref:`Deferred Batch Normalization` for more
:data:`False`, see :class:`DeferredBatchNorm` for more
details)
pipelined_backward (bool, optional):
if True, call torch.autograd.backward once per microbatch on the
Expand Down Expand Up @@ -527,15 +527,15 @@ def cpu(self) -> "Pipe":
return super().cpu()

def to(self, *args: Any, **kwargs: Any) -> "Pipe":
# Deny these usages:
#
# - to(device[, dtype, non_blocking])
# - to(tensor[, non_blocking])
#
# But allow this:
#
# - to(dtype[, non_blocking])
#
""" Restrict .to() options.
Deny these usages:
- to(device[, dtype, non_blocking])
- to(tensor[, non_blocking])
But allow this:
- to(dtype[, non_blocking])
"""
if self.devices:
if "device" in kwargs or "tensor" in kwargs:
raise MOVING_DENIED
Expand Down
6 changes: 5 additions & 1 deletion fairscale/optim/oss.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

from .utils import broadcast_object, recursive_copy_to_device

__all__ = ["OSS"]

if TYPE_CHECKING: # pragma: no cover
from torch.optim.optimizer import _params_t
else:
Expand All @@ -25,7 +27,9 @@
class OSS(Optimizer):
"""Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
optimizer and shards its state as described by ZeRO_.
:: opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
Expand Down

0 comments on commit 7c5203e

Please sign in to comment.