Commit d17fa851 authored by Dmytro Okhonko's avatar Dmytro Okhonko Committed by Facebook Github Bot
Browse files

Adadelta optimizer

Summary: Adding Adadelta optimizer to fairseq as wrapper around torch.optim.Adadelta

Reviewed By: myleott

Differential Revision: D14418635

fbshipit-source-id: 6bf5ec008e905a4a2cbf7415e9492f5eea3ff07f
parent 9e1c880f
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adadelta')
class Adadelta(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO',
help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS',
help='term added to the denominator to improve numerical stability')
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'rho': self.args.adadelta_rho,
'eps': self.args.adadelta_eps,
'weight_decay': self.args.weight_decay,
}
...@@ -220,6 +220,28 @@ class TestLanguageModeling(unittest.TestCase): ...@@ -220,6 +220,28 @@ class TestLanguageModeling(unittest.TestCase):
eval_lm_main(data_dir) eval_lm_main(data_dir)
class TestCommonOptions(unittest.TestCase):
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_optimizers') as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']
last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(data_dir, 'lstm', [
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', optimizer,
])
generate_main(data_dir)
def create_dummy_data(data_dir, num_examples=1000, maxlen=20): def create_dummy_data(data_dir, num_examples=1000, maxlen=20):
def _create_dummy_data(filename): def _create_dummy_data(filename):
...@@ -267,7 +289,6 @@ def train_translation_model(data_dir, arch, extra_flags=None): ...@@ -267,7 +289,6 @@ def train_translation_model(data_dir, arch, extra_flags=None):
data_dir, data_dir,
'--save-dir', data_dir, '--save-dir', data_dir,
'--arch', arch, '--arch', arch,
'--optimizer', 'nag',
'--lr', '0.05', '--lr', '0.05',
'--max-tokens', '500', '--max-tokens', '500',
'--max-epoch', '1', '--max-epoch', '1',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment