Commit d74fda26 authored by brett koonce's avatar brett koonce Committed by mcarilli
Browse files

docs: minor spelling tweaks (#25)

parent a21a945c
......@@ -341,7 +341,7 @@ class RNNCell(nn.Module):
"""
for i, _ in enumerate(self.hidden):
if self.hidden[i] is None:
raise RuntimeError("Must inialize hidden state before you can detach it")
raise RuntimeError("Must initialize hidden state before you can detach it")
for i, _ in enumerate(self.hidden):
self.hidden[i] = self.hidden[i].detach()
......
......@@ -5,7 +5,7 @@ from torch.nn.parameter import Parameter
class LARC(object):
"""
:class:`LARC` is a pytorch implementation of both the scaling and clipping varients of LARC,
:class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning rate for each individual parameter. The algorithm is designed to improve
convergence of large batch training.
......@@ -34,7 +34,7 @@ class LARC(object):
optimizer: Pytorch optimizer to wrap and modify learning rate for.
trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888
clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.
eps: epsilon kludge to help with numerical stability while calculating adaotive_lr
eps: epsilon kludge to help with numerical stability while calculating adaptive_lr
"""
def __init__(self, optimizer, trust_coefficient=0.02, clip=True, eps=1e-8):
......
......@@ -69,7 +69,7 @@ def find_cuda_home():
" or include directories.")
raise RuntimeError("Error: Could not find cuda on this system. " +
"Please set your CUDA_HOME enviornment variable "
"Please set your CUDA_HOME environment variable "
"to the CUDA base directory.")
return CUDA_HOME
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment