Commit b2f63c48 authored by Michael Carilli's avatar Michael Carilli
Browse files

Some documentation cleanup

parent 2cbca1a4
# from . import RNN
# from . import reparameterization
from . import fp16_utils from . import fp16_utils
from . import parallel from . import parallel
from . import amp from . import amp
......
...@@ -9,11 +9,5 @@ from .fp16util import ( ...@@ -9,11 +9,5 @@ from .fp16util import (
clip_grad_norm, clip_grad_norm,
) )
from .fused_weight_norm import Fused_Weight_Norm
from .fp16_optimizer import FP16_Optimizer from .fp16_optimizer import FP16_Optimizer
from .loss_scaler import LossScaler, DynamicLossScaler from .loss_scaler import LossScaler, DynamicLossScaler
import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
class Fused_Weight_Norm(Function):
"""
We are refactoring our fused kernels to add to Pytorch core, so that Pytorch's built-in weightnorm
will use them transparently. Please use Pytorch's built-in weightnorm implementation for now, to
future-proof your code.
"""
@staticmethod
def forward(ctx):
raise NotImplementedError("Use Pytorch's built-in weightnorm implementation. "+
"We are in the process of adding our fused kernels to Pytorch core, "+
"so Pytorch's built-in weightnorm will use them transparently.")
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
raise NotImplementedError("Use Pytorch's built-in weightnorm implementation. "+
"We are in the process of adding our fused kernels to Pytorch core, "+
"so Pytorch's built-in weightnorm will use them transparently.")
...@@ -20,17 +20,18 @@ except ImportError as err: ...@@ -20,17 +20,18 @@ except ImportError as err:
def convert_syncbn_model(module, process_group=None, channel_last=False): def convert_syncbn_model(module, process_group=None, channel_last=False):
''' '''
Recursively traverse module and its children to replace all Recursively traverse module and its children to replace all instances of
`torch.nn.modules.batchnorm._BatchNorm` with `apex.parallel.SyncBatchNorm` ``torch.nn.modules.batchnorm._BatchNorm`` with :class:`apex.parallel.SyncBatchNorm`.
All `torch.nn.BatchNorm*N*d` wraps around All ``torch.nn.BatchNorm*N*d`` wrap around
`torch.nn.modules.batchnorm._BatchNorm`, this function let you easily switch ``torch.nn.modules.batchnorm._BatchNorm``, so this function lets you easily switch
to use sync BN. to use sync BN.
Args: Args:
module: input module `torch.nn.Module` module (torch.nn.Module): input module
Example::
Examples::
>>> # model is an instance of torch.nn.Module >>> # model is an instance of torch.nn.Module
>>> import apex >>> import apex
>>> sync_bn_model = apex.parallel.convert_syncbn_model(model) >>> sync_bn_model = apex.parallel.convert_syncbn_model(model)
......
...@@ -91,7 +91,7 @@ class Reducer(object): ...@@ -91,7 +91,7 @@ class Reducer(object):
across processes. :class:`Reducer` is intended to give the user additional control: across processes. :class:`Reducer` is intended to give the user additional control:
Unlike :class:`DistributedDataParallel`, :class:`Reducer` will not automatically allreduce Unlike :class:`DistributedDataParallel`, :class:`Reducer` will not automatically allreduce
parameters during ``backward()``. parameters during ``backward()``.
Instead, :class:`Reducer` waits for the user to call `<reducer_instance>.reduce()` manually. Instead, :class:`Reducer` waits for the user to call ``<reducer_instance>.reduce()`` manually.
This enables, for example, delaying the allreduce to be carried out every This enables, for example, delaying the allreduce to be carried out every
several iterations instead of every single iteration. several iterations instead of every single iteration.
......
...@@ -8,19 +8,19 @@ from apex.parallel import ReduceOp ...@@ -8,19 +8,19 @@ from apex.parallel import ReduceOp
class SyncBatchNorm(_BatchNorm): class SyncBatchNorm(_BatchNorm):
""" """
synchronized batch normalization module extented from `torch.nn.BatchNormNd` synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
with the added stats reduction across multiple processes. with the added stats reduction across multiple processes.
:class:`apex.parallel.SyncBatchNorm` is designed to work with :class:`apex.parallel.SyncBatchNorm` is designed to work with
`DistributedDataParallel`. ``DistributedDataParallel``.
When running in training mode, the layer reduces stats across all processes When running in training mode, the layer reduces stats across all processes
to increase the effective batchsize for normalization layer. This is useful to increase the effective batchsize for normalization layer. This is useful
in applications where batch size is small on a given process that would in applications where batch size is small on a given process that would
diminish converged accuracy of the model. The model uses collective diminish converged accuracy of the model. The model uses collective
communication package from `torch.distributed`. communication package from ``torch.distributed``.
When running in evaluation mode, the layer falls back to When running in evaluation mode, the layer falls back to
`torch.nn.functional.batch_norm` ``torch.nn.functional.batch_norm``.
Args: Args:
num_features: :math:`C` from an expected input of size num_features: :math:`C` from an expected input of size
...@@ -37,7 +37,8 @@ class SyncBatchNorm(_BatchNorm): ...@@ -37,7 +37,8 @@ class SyncBatchNorm(_BatchNorm):
this module does not track such statistics and always uses batch this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True`` statistics in both training and eval modes. Default: ``True``
Examples:: Example::
>>> sbn = apex.parallel.SyncBatchNorm(100).cuda() >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()
>>> inp = torch.randn(10, 100, 14, 14).cuda() >>> inp = torch.randn(10, 100, 14, 14).cuda()
>>> out = sbn(inp) >>> out = sbn(inp)
......
.. role:: hidden
:class: hidden-section
apex.RNN
===================================
Under construction...
.. This submodule is an development API aimed to supply parity to torch.nn.RNN,
.. but be easier to extend. This module is not ready for use and still lacks important
.. features and validation.
..
.. .. automodule:: apex.RNN
.. .. currentmodule:: apex.RNN
..
.. .. RNN
.. ----------
..
.. .. autofunction:: LSTM
..
.. .. autofunction:: mLSTM
..
.. .. autofunction:: GRU
..
.. .. autofunction:: ReLU
..
.. .. autofunction:: Tanh
{% extends "!layout.html" %}
{% block sidebartitle %} {{ super() }}
<style>
/* Sidebar header (and topbar for mobile) */
.wy-side-nav-search, .wy-nav-top {
background: #76b900;
}
.wy-side-nav-search a:link, .wy-nav-top a:link {
color: #fff;
}
.wy-side-nav-search a:visited, .wy-nav-top a:visited {
color: #fff;
}
.wy-side-nav-search a:hover, .wy-nav-top a:hover {
color: #fff;
}
.wy-menu-vertical a:link, .wy-menu-vertical a:visited {
color: #d9d9d9
}
.wy-menu-vertical a:active {
background-color: #76b900
}
.wy-side-nav-search>div.version {
color: rgba(0, 0, 0, 0.3)
}
</style>
{% endblock %}
{% block footer %} {{ super() }}
<style>
a:link, a:visited {
color: #76b900;
}
a:hover {
color: #8c0;
}
.rst-content dl:not(.docutils) dt {
background: rgba(118, 185, 0, 0.1);
color: rgba(59,93,0,1);
border-top: solid 3px rgba(59,93,0,1);
}
</style>
{% endblock %}
...@@ -45,6 +45,7 @@ extensions = [ ...@@ -45,6 +45,7 @@ extensions = [
'sphinx.ext.mathjax', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon', 'sphinx.ext.napoleon',
'sphinx.ext.viewcode', 'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
] ]
napoleon_use_ivar = True napoleon_use_ivar = True
...@@ -87,7 +88,7 @@ language = None ...@@ -87,7 +88,7 @@ language = None
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path # This pattern also affects html_static_path and html_extra_path
exclude_patterns = [] exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
...@@ -97,7 +98,7 @@ pygments_style = 'sphinx' ...@@ -97,7 +98,7 @@ pygments_style = 'sphinx'
todo_include_todos = True todo_include_todos = True
# -- Options for HTML output ---------------------------------------------- # -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
...@@ -131,13 +132,13 @@ html_context = { ...@@ -131,13 +132,13 @@ html_context = {
} }
# -- Options for HTMLHelp output ------------------------------------------ # -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc' htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ------------------------------------------------
latex_elements = { latex_elements = {
# The paper size ('letterpaper' or 'a4paper'). # The paper size ('letterpaper' or 'a4paper').
...@@ -166,7 +167,7 @@ latex_documents = [ ...@@ -166,7 +167,7 @@ latex_documents = [
] ]
# -- Options for manual page output --------------------------------------- # -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
...@@ -176,7 +177,7 @@ man_pages = [ ...@@ -176,7 +177,7 @@ man_pages = [
] ]
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples # Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author, # (source start file, target name, title, author,
......
...@@ -57,10 +57,3 @@ Manual master parameter management ...@@ -57,10 +57,3 @@ Manual master parameter management
.. autofunction:: master_params_to_model_params .. autofunction:: master_params_to_model_params
.. autofunction:: model_grads_to_master_grads .. autofunction:: model_grads_to_master_grads
Custom Operations
-----------------
.. autoclass:: Fused_Weight_Norm
:members:
.. role:: hidden
:class: hidden-section
apex.reparameterization
===================================
Under construction...
.. .. automodule:: apex.reparameterization
.. .. currentmodule:: apex.reparameterization
..
.. .. autoclass:: Reparameterization
.. :members:
..
.. .. autoclass:: WeightNorm
.. :members:
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment