train.py 28 KB
Newer Older
huchen's avatar
huchen committed
1
#!/usr/bin/env python
Pan,Huiwen's avatar
Pan,Huiwen committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import os

os.environ['KMP_AFFINITY'] = 'disabled'

huchen's avatar
huchen committed
28
29
import argparse
import logging
Pan,Huiwen's avatar
Pan,Huiwen committed
30
31
import sys
import time
huchen's avatar
huchen committed
32
33
from ast import literal_eval

Pan,Huiwen's avatar
Pan,Huiwen committed
34
import dllogger
huchen's avatar
huchen committed
35
36
37
38
39
40
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed

import seq2seq.data.config as config
Pan,Huiwen's avatar
Pan,Huiwen committed
41
#import seq2seq.gpu_affinity as gpu_affinity
huchen's avatar
huchen committed
42
43
import seq2seq.train.trainer as trainers
import seq2seq.utils as utils
Pan,Huiwen's avatar
Pan,Huiwen committed
44
45
from seq2seq.data.dataset import LazyParallelDataset
from seq2seq.data.dataset import ParallelDataset
huchen's avatar
huchen committed
46
47
from seq2seq.data.dataset import TextDataset
from seq2seq.data.tokenizer import Tokenizer
Pan,Huiwen's avatar
Pan,Huiwen committed
48
from seq2seq.inference.translator import Translator
huchen's avatar
huchen committed
49
50
from seq2seq.models.gnmt import GNMT
from seq2seq.train.smoothing import LabelSmoothing
Pan,Huiwen's avatar
Pan,Huiwen committed
51
from seq2seq.train.table import TrainingTable
huchen's avatar
huchen committed
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75


def parse_args():
    """
    Parse commandline arguments.
    """
    def exclusive_group(group, name, default, help):
        destname = name.replace('-', '_')
        subgroup = group.add_mutually_exclusive_group(required=False)
        subgroup.add_argument(f'--{name}', dest=f'{destname}',
                              action='store_true',
                              help=f'{help} (use \'--no-{name}\' to disable)')
        subgroup.add_argument(f'--no-{name}', dest=f'{destname}',
                              action='store_false', help=argparse.SUPPRESS)
        subgroup.set_defaults(**{destname: default})

    parser = argparse.ArgumentParser(
        description='GNMT training',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    # dataset
    dataset = parser.add_argument_group('dataset setup')
    dataset.add_argument('--dataset-dir', default='data/wmt16_de_en',
                         help='path to the directory with training/test data')
Pan,Huiwen's avatar
Pan,Huiwen committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

    dataset.add_argument('--src-lang',
                         default='en',
                         help='source language')
    dataset.add_argument('--tgt-lang',
                         default='de',
                         help='target language')

    dataset.add_argument('--vocab',
                         default='vocab.bpe.32000',
                         help='path to the vocabulary file \
                         (relative to DATASET_DIR directory)')
    dataset.add_argument('-bpe', '--bpe-codes', default='bpe.32000',
                         help='path to the file with bpe codes \
                         (relative to DATASET_DIR directory)')

    dataset.add_argument('--train-src',
                         default='train.tok.clean.bpe.32000.en',
                         help='path to the training source data file \
                         (relative to DATASET_DIR directory)')
    dataset.add_argument('--train-tgt',
                         default='train.tok.clean.bpe.32000.de',
                         help='path to the training target data file \
                         (relative to DATASET_DIR directory)')

    dataset.add_argument('--val-src',
                         default='newstest_dev.tok.clean.bpe.32000.en',
                         help='path to the validation source data file \
                         (relative to DATASET_DIR directory)')
    dataset.add_argument('--val-tgt',
                         default='newstest_dev.tok.clean.bpe.32000.de',
                         help='path to the validation target data file \
                         (relative to DATASET_DIR directory)')

    dataset.add_argument('--test-src',
                         default='newstest2014.tok.bpe.32000.en',
                         help='path to the test source data file \
                         (relative to DATASET_DIR directory)')
    dataset.add_argument('--test-tgt',
                         default='newstest2014.de',
                         help='path to the test target data file \
                         (relative to DATASET_DIR directory)')
huchen's avatar
huchen committed
118
119
120

    # results
    results = parser.add_argument_group('results setup')
Pan,Huiwen's avatar
Pan,Huiwen committed
121
    results.add_argument('--save-dir', default='gnmt',
huchen's avatar
huchen committed
122
123
124
125
                         help='path to directory with results, it will be \
                         automatically created if it does not exist')
    results.add_argument('--print-freq', default=10, type=int,
                         help='print log every PRINT_FREQ batches')
Pan,Huiwen's avatar
Pan,Huiwen committed
126
127
128
    results.add_argument('--warmup', default=1, type=int,
                         help='number of warmup iterations for performance \
                         counters')
huchen's avatar
huchen committed
129
130
131
132

    # model
    model = parser.add_argument_group('model setup')
    model.add_argument('--hidden-size', default=1024, type=int,
Pan,Huiwen's avatar
Pan,Huiwen committed
133
                       help='hidden size of the model')
huchen's avatar
huchen committed
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    model.add_argument('--num-layers', default=4, type=int,
                       help='number of RNN layers in encoder and in decoder')
    model.add_argument('--dropout', default=0.2, type=float,
                       help='dropout applied to input of RNN cells')

    exclusive_group(group=model, name='share-embedding', default=True,
                    help='use shared embeddings for encoder and decoder')

    model.add_argument('--smoothing', default=0.1, type=float,
                       help='label smoothing, if equal to zero model will use \
                       CrossEntropyLoss, if not zero model will be trained \
                       with label smoothing loss')

    # setup
    general = parser.add_argument_group('general setup')
Pan,Huiwen's avatar
Pan,Huiwen committed
149
150
151
    general.add_argument('--math', default='fp32',
                         choices=['fp16', 'fp32', 'tf32', 'manual_fp16'],
                         help='precision')
huchen's avatar
huchen committed
152
153
154
155
156
157
158
    general.add_argument('--seed', default=None, type=int,
                         help='master seed for random number generators, if \
                         "seed" is undefined then the master seed will be \
                         sampled from random.SystemRandom()')
    general.add_argument('--prealloc-mode', default='always', type=str,
                         choices=['off', 'once', 'always'],
                         help='controls preallocation')
Pan,Huiwen's avatar
Pan,Huiwen committed
159
160
161
162
163
164
165
166
167
168
169
    general.add_argument('--dllog-file', type=str, default='train_log.json',
                         help='Name of the DLLogger output file')
    general.add_argument('--affinity', type=str,
                         default='socket_unique_interleaved',
                         choices=['socket', 'single', 'single_unique',
                                  'socket_unique_interleaved',
                                  'socket_unique_continuous',
                                  'disabled'],
                         help='type of CPU affinity')

    exclusive_group(group=general, name='eval', default=True,
huchen's avatar
huchen committed
170
                    help='run validation and test after every epoch')
Pan,Huiwen's avatar
Pan,Huiwen committed
171
    exclusive_group(group=general, name='env', default=True,
huchen's avatar
huchen committed
172
173
174
175
176
177
178
179
180
181
182
                    help='print info about execution env')
    exclusive_group(group=general, name='cuda', default=True,
                    help='enables cuda')
    exclusive_group(group=general, name='cudnn', default=True,
                    help='enables cudnn')
    exclusive_group(group=general, name='log-all-ranks', default=True,
                    help='enables logging from all distributed ranks, if \
                    disabled then only logs from rank 0 are reported')

    # training
    training = parser.add_argument_group('training setup')
Pan,Huiwen's avatar
Pan,Huiwen committed
183
184
185
186
    dataset.add_argument('--train-max-size', default=None, type=int,
                         help='use at most TRAIN_MAX_SIZE elements from \
                         training dataset (useful for benchmarking), by \
                         default uses entire dataset')
huchen's avatar
huchen committed
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    training.add_argument('--train-batch-size', default=128, type=int,
                          help='training batch size per worker')
    training.add_argument('--train-global-batch-size', default=None, type=int,
                          help='global training batch size, this argument \
                          does not have to be defined, if it is defined it \
                          will be used to automatically \
                          compute train_iter_size \
                          using the equation: train_iter_size = \
                          train_global_batch_size // (train_batch_size * \
                          world_size)')
    training.add_argument('--train-iter-size', metavar='N', default=1,
                          type=int,
                          help='training iter size, training loop will \
                          accumulate gradients over N iterations and execute \
                          optimizer every N steps')
Pan,Huiwen's avatar
Pan,Huiwen committed
202
    training.add_argument('--epochs', default=1, type=int,
huchen's avatar
huchen committed
203
204
205
206
207
                          help='max number of training epochs')

    training.add_argument('--grad-clip', default=5.0, type=float,
                          help='enables gradient clipping and sets maximum \
                          norm of gradients')
Pan,Huiwen's avatar
Pan,Huiwen committed
208
    training.add_argument('--train-max-length', default=50, type=int,
huchen's avatar
huchen committed
209
210
                          help='maximum sequence length for training \
                          (including special BOS and EOS tokens)')
Pan,Huiwen's avatar
Pan,Huiwen committed
211
    training.add_argument('--train-min-length', default=0, type=int,
huchen's avatar
huchen committed
212
213
                          help='minimum sequence length for training \
                          (including special BOS and EOS tokens)')
Pan,Huiwen's avatar
Pan,Huiwen committed
214
    training.add_argument('--train-loader-workers', default=2, type=int,
huchen's avatar
huchen committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
                          help='number of workers for training data loading')
    training.add_argument('--batching', default='bucketing', type=str,
                          choices=['random', 'sharding', 'bucketing'],
                          help='select batching algorithm')
    training.add_argument('--shard-size', default=80, type=int,
                          help='shard size for "sharding" batching algorithm, \
                          in multiples of global batch size')
    training.add_argument('--num-buckets', default=5, type=int,
                          help='number of buckets for "bucketing" batching \
                          algorithm')

    # optimizer
    optimizer = parser.add_argument_group('optimizer setup')
    optimizer.add_argument('--optimizer', type=str, default='Adam',
                           help='training optimizer')
Pan,Huiwen's avatar
Pan,Huiwen committed
230
    optimizer.add_argument('--lr', type=float, default=2.00e-3,
huchen's avatar
huchen committed
231
232
233
234
235
236
                           help='learning rate')
    optimizer.add_argument('--optimizer-extra', type=str,
                           default="{}",
                           help='extra options for the optimizer')

    # mixed precision loss scaling
Pan,Huiwen's avatar
Pan,Huiwen committed
237
238
239
240
    loss_scaling = parser.add_argument_group(
        'mixed precision loss scaling setup'
        )
    loss_scaling.add_argument('--init-scale', type=float, default=8192,
huchen's avatar
huchen committed
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
                              help='initial loss scale')
    loss_scaling.add_argument('--upscale-interval', type=float, default=128,
                              help='loss upscaling interval')

    # scheduler
    scheduler = parser.add_argument_group('learning rate scheduler setup')
    scheduler.add_argument('--warmup-steps', type=str, default='200',
                           help='number of learning rate warmup iterations')
    scheduler.add_argument('--remain-steps', type=str, default='0.666',
                           help='starting iteration for learning rate decay')
    scheduler.add_argument('--decay-interval', type=str, default='None',
                           help='interval between learning rate decay steps')
    scheduler.add_argument('--decay-steps', type=int, default=4,
                           help='max number of learning rate decay steps')
    scheduler.add_argument('--decay-factor', type=float, default=0.5,
                           help='learning rate decay factor')

Pan,Huiwen's avatar
Pan,Huiwen committed
258
259
260
261
262
263
264
265
266
267
268
269
270
    # validation
    val = parser.add_argument_group('validation setup')
    val.add_argument('--val-batch-size', default=64, type=int,
                     help='batch size for validation')
    val.add_argument('--val-max-length', default=125, type=int,
                     help='maximum sequence length for validation \
                     (including special BOS and EOS tokens)')
    val.add_argument('--val-min-length', default=0, type=int,
                     help='minimum sequence length for validation \
                     (including special BOS and EOS tokens)')
    val.add_argument('--val-loader-workers', default=0, type=int,
                     help='number of workers for validation data loading')

huchen's avatar
huchen committed
271
272
273
274
    # test
    test = parser.add_argument_group('test setup')
    test.add_argument('--test-batch-size', default=128, type=int,
                      help='batch size for test')
Pan,Huiwen's avatar
Pan,Huiwen committed
275
    test.add_argument('--test-max-length', default=150, type=int,
huchen's avatar
huchen committed
276
277
                      help='maximum sequence length for test \
                      (including special BOS and EOS tokens)')
Pan,Huiwen's avatar
Pan,Huiwen committed
278
    test.add_argument('--test-min-length', default=0, type=int,
huchen's avatar
huchen committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
                      help='minimum sequence length for test \
                      (including special BOS and EOS tokens)')
    test.add_argument('--beam-size', default=5, type=int,
                      help='beam size')
    test.add_argument('--len-norm-factor', default=0.6, type=float,
                      help='length normalization factor')
    test.add_argument('--cov-penalty-factor', default=0.1, type=float,
                      help='coverage penalty factor')
    test.add_argument('--len-norm-const', default=5.0, type=float,
                      help='length normalization constant')
    test.add_argument('--intra-epoch-eval', metavar='N', default=0, type=int,
                      help='evaluate within training epoch, this option will \
                      enable extra N equally spaced evaluations executed \
                      during each training epoch')
    test.add_argument('--test-loader-workers', default=0, type=int,
                      help='number of workers for test data loading')

    # checkpointing
    chkpt = parser.add_argument_group('checkpointing setup')
    chkpt.add_argument('--start-epoch', default=0, type=int,
                       help='manually set initial epoch counter')
    chkpt.add_argument('--resume', default=None, type=str, metavar='PATH',
                       help='resumes training from checkpoint from PATH')
    chkpt.add_argument('--save-all', action='store_true', default=False,
                       help='saves checkpoint after every epoch')
    chkpt.add_argument('--save-freq', default=5000, type=int,
                       help='save checkpoint every SAVE_FREQ batches')
    chkpt.add_argument('--keep-checkpoints', default=0, type=int,
                       help='keep only last KEEP_CHECKPOINTS checkpoints, \
                       affects only checkpoints controlled by --save-freq \
                       option')

    # benchmarking
    benchmark = parser.add_argument_group('benchmark setup')
Pan,Huiwen's avatar
Pan,Huiwen committed
313
314
315
316
317
    benchmark.add_argument('--target-perf', default=None, type=float,
                           help='target training performance (in tokens \
                           per second)')
    benchmark.add_argument('--target-bleu', default=None, type=float,
                           help='target accuracy')
huchen's avatar
huchen committed
318
319
320

    # distributed
    distributed = parser.add_argument_group('distributed setup')
Pan,Huiwen's avatar
Pan,Huiwen committed
321
322
323
    distributed.add_argument('--local_rank',  type=int,
                             default=os.getenv('LOCAL_RANK', 0),
                             help='Used for multi-process training.')
huchen's avatar
huchen committed
324
325
326

    args = parser.parse_args()

Pan,Huiwen's avatar
Pan,Huiwen committed
327
328
329
330
331
332
333
334
335
336
337
    args.lang = {'src': args.src_lang, 'tgt': args.tgt_lang}

    args.vocab = os.path.join(args.dataset_dir, args.vocab)
    args.bpe_codes = os.path.join(args.dataset_dir, args.bpe_codes)
    args.train_src = os.path.join(args.dataset_dir, args.train_src)
    args.train_tgt = os.path.join(args.dataset_dir, args.train_tgt)
    args.val_src = os.path.join(args.dataset_dir, args.val_src)
    args.val_tgt = os.path.join(args.dataset_dir, args.val_tgt)
    args.test_src = os.path.join(args.dataset_dir, args.test_src)
    args.test_tgt = os.path.join(args.dataset_dir, args.test_tgt)

huchen's avatar
huchen committed
338
339
340
341
342
343
344
    args.warmup_steps = literal_eval(args.warmup_steps)
    args.remain_steps = literal_eval(args.remain_steps)
    args.decay_interval = literal_eval(args.decay_interval)

    return args


Pan,Huiwen's avatar
Pan,Huiwen committed
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
def set_iter_size(train_iter_size, train_global_batch_size, train_batch_size):
    """
    Automatically set train_iter_size based on train_global_batch_size,
    world_size and per-worker train_batch_size

    :param train_global_batch_size: global training batch size
    :param train_batch_size: local training batch size
    """
    if train_global_batch_size is not None:
        global_bs = train_global_batch_size
        bs = train_batch_size
        world_size = utils.get_world_size()
        assert global_bs % (bs * world_size) == 0
        train_iter_size = global_bs // (bs * world_size)
        logging.info(f'Global batch size was set, '
                     f'Setting train_iter_size to {train_iter_size}')
    return train_iter_size


def build_criterion(vocab_size, padding_idx, smoothing):
huchen's avatar
huchen committed
365
366
    if smoothing == 0.:
        logging.info(f'Building CrossEntropyLoss')
Pan,Huiwen's avatar
Pan,Huiwen committed
367
        criterion = nn.CrossEntropyLoss(ignore_index=padding_idx, size_average=False)
huchen's avatar
huchen committed
368
369
    else:
        logging.info(f'Building LabelSmoothingLoss (smoothing: {smoothing})')
Pan,Huiwen's avatar
Pan,Huiwen committed
370
371
        criterion = LabelSmoothing(padding_idx, smoothing)

huchen's avatar
huchen committed
372
373
374
375
376
377
378
    return criterion


def main():
    """
    Launches data-parallel multi-gpu training.
    """
Pan,Huiwen's avatar
Pan,Huiwen committed
379
    training_start = time.time()
huchen's avatar
huchen committed
380
    args = parse_args()
Pan,Huiwen's avatar
Pan,Huiwen committed
381
382
383
384
385
386
387
388
    if args.affinity != 'disabled':
        nproc_per_node = torch.cuda.device_count()
#        affinity = gpu_affinity.set_affinity(
#            args.local_rank,
#            nproc_per_node,
#            args.affinity
#        )
#        print(f'{args.local_rank}: thread affinity: {affinity}')
huchen's avatar
huchen committed
389
    device = utils.set_device(args.cuda, args.local_rank)
Pan,Huiwen's avatar
Pan,Huiwen committed
390
    utils.init_distributed(args.cuda)
huchen's avatar
huchen committed
391
392
393
394
395
396
    args.rank = utils.get_rank()

    if not args.cudnn:
        torch.backends.cudnn.enabled = False

    # create directory for results
Pan,Huiwen's avatar
Pan,Huiwen committed
397
    os.makedirs(args.save_dir, exist_ok=True)
huchen's avatar
huchen committed
398
399
400
401

    # setup logging
    log_filename = f'log_rank_{utils.get_rank()}.log'
    utils.setup_logging(args.log_all_ranks,
Pan,Huiwen's avatar
Pan,Huiwen committed
402
403
404
405
                        os.path.join(args.save_dir, log_filename))

    dllog_file = os.path.join(args.save_dir, args.dllog_file)
    utils.setup_dllogger(enabled=True, filename=dllog_file)
huchen's avatar
huchen committed
406
407
408
409

    if args.env:
        utils.log_env_info()

Pan,Huiwen's avatar
Pan,Huiwen committed
410
    logging.info(f'Saving results to: {args.save_dir}')
huchen's avatar
huchen committed
411
    logging.info(f'Run arguments: {args}')
Pan,Huiwen's avatar
Pan,Huiwen committed
412
    dllogger.log(step='PARAMETER', data=vars(args))
huchen's avatar
huchen committed
413

Pan,Huiwen's avatar
Pan,Huiwen committed
414
415
416
417
418
419
    args.train_iter_size = set_iter_size(args.train_iter_size,
                                         args.train_global_batch_size,
                                         args.train_batch_size)

    worker_seeds, shuffling_seeds = utils.setup_seeds(args.seed,
                                                      args.epochs,
huchen's avatar
huchen committed
420
421
422
423
424
425
426
                                                      device)
    worker_seed = worker_seeds[args.rank]
    logging.info(f'Worker {args.rank} is using worker seed: {worker_seed}')
    torch.manual_seed(worker_seed)

    # build tokenizer
    pad_vocab = utils.pad_vocabulary(args.math)
Pan,Huiwen's avatar
Pan,Huiwen committed
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    tokenizer = Tokenizer(args.vocab, args.bpe_codes, args.lang, pad_vocab)

    # build datasets
    train_data = LazyParallelDataset(
        src_fname=args.train_src,
        tgt_fname=args.train_tgt,
        tokenizer=tokenizer,
        min_len=args.train_min_length,
        max_len=args.train_max_length,
        sort=False,
        max_size=args.train_max_size,
        )

    val_data = ParallelDataset(
        src_fname=args.val_src,
        tgt_fname=args.val_tgt,
        tokenizer=tokenizer,
        min_len=args.val_min_length,
        max_len=args.val_max_length,
        sort=True,
        )

    test_data = TextDataset(
        src_fname=args.test_src,
        tokenizer=tokenizer,
        min_len=args.test_min_length,
        max_len=args.test_max_length,
        sort=True,
        )
huchen's avatar
huchen committed
456
457
458
459
460

    vocab_size = tokenizer.vocab_size

    # build GNMT model
    model_config = {'hidden_size': args.hidden_size,
Pan,Huiwen's avatar
Pan,Huiwen committed
461
                    'vocab_size': vocab_size,
huchen's avatar
huchen committed
462
                    'num_layers': args.num_layers,
Pan,Huiwen's avatar
Pan,Huiwen committed
463
464
                    'dropout': args.dropout,
                    'batch_first': False,
huchen's avatar
huchen committed
465
                    'share_embedding': args.share_embedding,
Pan,Huiwen's avatar
Pan,Huiwen committed
466
467
                    }
    model = GNMT(**model_config).to(device)
huchen's avatar
huchen committed
468
469
    logging.info(model)

Pan,Huiwen's avatar
Pan,Huiwen committed
470
471
    batch_first = model.batch_first

huchen's avatar
huchen committed
472
    # define loss function (criterion) and optimizer
Pan,Huiwen's avatar
Pan,Huiwen committed
473
474
    criterion = build_criterion(vocab_size, config.PAD,
                                args.smoothing).to(device)
huchen's avatar
huchen committed
475
476
477
478
479

    opt_config = {'optimizer': args.optimizer, 'lr': args.lr}
    opt_config.update(literal_eval(args.optimizer_extra))
    logging.info(f'Training optimizer config: {opt_config}')

Pan,Huiwen's avatar
Pan,Huiwen committed
480
481
482
483
484
    scheduler_config = {'warmup_steps': args.warmup_steps,
                        'remain_steps': args.remain_steps,
                        'decay_interval': args.decay_interval,
                        'decay_steps': args.decay_steps,
                        'decay_factor': args.decay_factor}
huchen's avatar
huchen committed
485

Pan,Huiwen's avatar
Pan,Huiwen committed
486
    logging.info(f'Training LR schedule config: {scheduler_config}')
huchen's avatar
huchen committed
487

Pan,Huiwen's avatar
Pan,Huiwen committed
488
489
    num_parameters = sum([l.nelement() for l in model.parameters()])
    logging.info(f'Number of parameters: {num_parameters}')
huchen's avatar
huchen committed
490
491
492
493
494
495

    batching_opt = {'shard_size': args.shard_size,
                    'num_buckets': args.num_buckets}
    # get data loaders
    train_loader = train_data.get_loader(batch_size=args.train_batch_size,
                                         seeds=shuffling_seeds,
Pan,Huiwen's avatar
Pan,Huiwen committed
496
                                         batch_first=batch_first,
huchen's avatar
huchen committed
497
498
499
500
501
                                         shuffle=True,
                                         batching=args.batching,
                                         batching_opt=batching_opt,
                                         num_workers=args.train_loader_workers)

Pan,Huiwen's avatar
Pan,Huiwen committed
502
503
504
505
    val_loader = val_data.get_loader(batch_size=args.val_batch_size,
                                     batch_first=batch_first,
                                     shuffle=False,
                                     num_workers=args.val_loader_workers)
huchen's avatar
huchen committed
506
507

    test_loader = test_data.get_loader(batch_size=args.test_batch_size,
Pan,Huiwen's avatar
Pan,Huiwen committed
508
                                       batch_first=batch_first,
huchen's avatar
huchen committed
509
                                       shuffle=False,
Pan,Huiwen's avatar
Pan,Huiwen committed
510
                                       pad=True,
huchen's avatar
huchen committed
511
512
513
514
515
516
                                       num_workers=args.test_loader_workers)

    translator = Translator(model=model,
                            tokenizer=tokenizer,
                            loader=test_loader,
                            beam_size=args.beam_size,
Pan,Huiwen's avatar
Pan,Huiwen committed
517
                            max_seq_len=args.test_max_length,
huchen's avatar
huchen committed
518
519
520
521
                            len_norm_factor=args.len_norm_factor,
                            len_norm_const=args.len_norm_const,
                            cov_penalty_factor=args.cov_penalty_factor,
                            print_freq=args.print_freq,
Pan,Huiwen's avatar
Pan,Huiwen committed
522
523
                            reference=args.test_tgt,
                            )
huchen's avatar
huchen committed
524

Pan,Huiwen's avatar
Pan,Huiwen committed
525
    # create trainer
huchen's avatar
huchen committed
526
    total_train_iters = len(train_loader) // args.train_iter_size * args.epochs
Pan,Huiwen's avatar
Pan,Huiwen committed
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
    save_info = {
        'model_config': model_config,
        'config': args,
        'tokenizer': tokenizer.get_state()
        }
    loss_scaling = {
        'init_scale': args.init_scale,
        'upscale_interval': args.upscale_interval
        }
    trainer_options = dict(
        model=model,
        criterion=criterion,
        grad_clip=args.grad_clip,
        iter_size=args.train_iter_size,
        save_dir=args.save_dir,
        save_freq=args.save_freq,
        save_info=save_info,
        opt_config=opt_config,
        scheduler_config=scheduler_config,
        train_iterations=total_train_iters,
        keep_checkpoints=args.keep_checkpoints,
        math=args.math,
        loss_scaling=loss_scaling,
        print_freq=args.print_freq,
        intra_epoch_eval=args.intra_epoch_eval,
        translator=translator,
        prealloc_mode=args.prealloc_mode,
        warmup=args.warmup,
        )
huchen's avatar
huchen committed
556

Pan,Huiwen's avatar
Pan,Huiwen committed
557
    trainer = trainers.Seq2SeqTrainer(**trainer_options)
huchen's avatar
huchen committed
558
559
560
561
562

    # optionally resume from a checkpoint
    if args.resume:
        checkpoint_file = args.resume
        if os.path.isdir(checkpoint_file):
Pan,Huiwen's avatar
Pan,Huiwen committed
563
            checkpoint_file = os.path.join(checkpoint_file, 'model_best.pth')
huchen's avatar
huchen committed
564
565
566
567
568
569
        if os.path.isfile(checkpoint_file):
            trainer.load(checkpoint_file)
        else:
            logging.error(f'No checkpoint found at {args.resume}')

    # training loop
Pan,Huiwen's avatar
Pan,Huiwen committed
570
571
    best_loss = float('inf')
    training_perf = []
huchen's avatar
huchen committed
572
573
574
575
    break_training = False
    test_bleu = None
    for epoch in range(args.start_epoch, args.epochs):
        logging.info(f'Starting epoch {epoch}')
Pan,Huiwen's avatar
Pan,Huiwen committed
576

huchen's avatar
huchen committed
577
578
579
580
        train_loader.sampler.set_epoch(epoch)

        trainer.epoch = epoch
        train_loss, train_perf = trainer.optimize(train_loader)
Pan,Huiwen's avatar
Pan,Huiwen committed
581
        training_perf.append(train_perf)
huchen's avatar
huchen committed
582

Pan,Huiwen's avatar
Pan,Huiwen committed
583
584
585
586
587
588
589
590
591
592
        # evaluate on validation set
        if args.eval:
            logging.info(f'Running validation on dev set')
            val_loss, val_perf = trainer.evaluate(val_loader)

            # remember best prec@1 and save checkpoint
            if args.rank == 0:
                is_best = val_loss < best_loss
                best_loss = min(val_loss, best_loss)
                trainer.save(save_all=args.save_all, is_best=is_best)
huchen's avatar
huchen committed
593
594

        if args.eval:
Pan,Huiwen's avatar
Pan,Huiwen committed
595
596
597
598
599
600
601
602
603
604
605
606
            utils.barrier()
            eval_fname = f'eval_epoch_{epoch}'
            eval_path = os.path.join(args.save_dir, eval_fname)
            _, eval_stats = translator.run(
                calc_bleu=True,
                epoch=epoch,
                eval_path=eval_path,
                )
            test_bleu = eval_stats['bleu']
            if args.target_bleu and test_bleu >= args.target_bleu:
                logging.info(f'Target accuracy reached')
                break_training = True
huchen's avatar
huchen committed
607
608
609
610
611

        acc_log = []
        acc_log += [f'Summary: Epoch: {epoch}']
        acc_log += [f'Training Loss: {train_loss:.4f}']
        if args.eval:
Pan,Huiwen's avatar
Pan,Huiwen committed
612
            acc_log += [f'Validation Loss: {val_loss:.4f}']
huchen's avatar
huchen committed
613
614
615
616
617
            acc_log += [f'Test BLEU: {test_bleu:.2f}']

        perf_log = []
        perf_log += [f'Performance: Epoch: {epoch}']
        perf_log += [f'Training: {train_perf:.0f} Tok/s']
Pan,Huiwen's avatar
Pan,Huiwen committed
618
619
        if args.eval:
            perf_log += [f'Validation: {val_perf:.0f} Tok/s']
huchen's avatar
huchen committed
620
621
622
623
624
625
626
627
628

        if args.rank == 0:
            logging.info('\t'.join(acc_log))
            logging.info('\t'.join(perf_log))

        logging.info(f'Finished epoch {epoch}')
        if break_training:
            break

Pan,Huiwen's avatar
Pan,Huiwen committed
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
    utils.barrier()
    training_stop = time.time()
    training_time = training_stop - training_start
    logging.info(f'Total training time {training_time:.0f} s')

    table = TrainingTable()
    avg_training_perf = sum(training_perf) / len(training_perf)
    table.add(utils.get_world_size(), args.train_batch_size, test_bleu,
              avg_training_perf, training_time)
    if utils.get_rank() == 0:
        table.write('Training Summary', args.math)

    summary = {
        'train_throughput': avg_training_perf,
        'train_elapsed': training_time,
        'test_bleu': test_bleu,
        }
    dllogger.log(step=tuple(), data=summary)

    passed = utils.benchmark(test_bleu, args.target_bleu,
                             train_perf, args.target_perf)
    if not passed:
        sys.exit(1)
huchen's avatar
huchen committed
652
653
654
655


if __name__ == '__main__':
    main()