Commit bf4aa847 authored by Michael Carilli's avatar Michael Carilli
Browse files

Moving sgd to optimizers

parent 6af5980e
from .fused_adam import FusedAdam
from .fused_sgd import FusedSGD
from .fp16_optimizer import FP16_Optimizer
......@@ -3,7 +3,7 @@ from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class SGD(Optimizer):
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment