train.py 15.3 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
#!/usr/bin/env python3 -u
Sergey Edunov's avatar
Sergey Edunov committed
2
3
4
5
6
7
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
Myle Ott's avatar
Myle Ott committed
8
9
10
"""
Train a new model on one or across multiple GPUs.
"""
Sergey Edunov's avatar
Sergey Edunov committed
11

12
import collections
Myle Ott's avatar
Myle Ott committed
13
import itertools
14
import math
Myle Ott's avatar
Myle Ott committed
15
import os
16
17
import random

18
import torch
Sergey Edunov's avatar
Sergey Edunov committed
19

20
from fairseq import distributed_utils, options, progress_bar, tasks, utils
21
from fairseq.data import iterators
22
23
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
24
from fairseq.utils import import_user_module
Sergey Edunov's avatar
Sergey Edunov committed
25

Myle Ott's avatar
Myle Ott committed
26

27
def main(args, init_distributed=False):
28
29
    import_user_module(args)

30
31
    if args.max_tokens is None:
        args.max_tokens = 6000
32
33
    print(args)

Myle Ott's avatar
Myle Ott committed
34
35
    if torch.cuda.is_available() and not args.cpu:
        torch.cuda.set_device(args.device_id)
36
37
    torch.manual_seed(args.seed)

Myle Ott's avatar
Myle Ott committed
38
39
    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)
40

Myle Ott's avatar
Myle Ott committed
41
    # Load dataset splits
42
    load_dataset_splits(args, task)
43

Myle Ott's avatar
Myle Ott committed
44
45
46
    # Build model and criterion
    model = task.build_model(args)
    criterion = task.build_criterion(args)
47
    print(model)
48
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
49
50
51
52
    print('| num. model params: {} (num. trained: {})'.format(
        sum(p.numel() for p in model.parameters()),
        sum(p.numel() for p in model.parameters() if p.requires_grad),
    ))
53

54
55
56
57
58
59
60
    # Make a dummy batch to (i) warm the caching allocator and (ii) as a
    # placeholder DistributedDataParallel when there's an uneven number of
    # batches per worker.
    max_positions = utils.resolve_max_positions(
        task.max_positions(),
        model.max_positions(),
    )
61
62
    dummy_batch = task.dataset(args.train_subset).get_dummy_batch(args.max_tokens, max_positions)
    oom_batch = task.dataset(args.train_subset).get_dummy_batch(1, max_positions)
63

64
    # Build trainer
65
    trainer = Trainer(args, task, model, criterion, dummy_batch, oom_batch)
66
67
68
69
70
71
72
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

    # Initialize dataloader
73
    epoch_itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
74
        dataset=task.dataset(args.train_subset),
75
        max_tokens=args.max_tokens,
76
        max_sentences=args.max_sentences,
Myle Ott's avatar
Myle Ott committed
77
78
        max_positions=max_positions,
        ignore_invalid_inputs=True,
79
        required_batch_size_multiple=args.required_batch_size_multiple,
80
81
        seed=args.seed,
        num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
82
        shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
83
        num_workers=args.num_workers,
84
85
    )

86
87
88
89
90
91
    # Initialize distributed training (after data loading)
    if init_distributed:
        import socket
        args.distributed_rank = distributed_utils.distributed_init(args)
        print('| initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))

92
    # Load the latest checkpoint if one is available
93
    if not load_checkpoint(args, trainer, epoch_itr):
94
        trainer.dummy_train_step([dummy_batch])
95
96
97
98
99
100
101

    # Train until the learning rate gets too small
    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
Myle Ott's avatar
Myle Ott committed
102
    valid_losses = [None]
103
    valid_subsets = args.valid_subset.split(',')
Myle Ott's avatar
Myle Ott committed
104
    while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
105
        # train for one epoch
Myle Ott's avatar
Myle Ott committed
106
        train(args, trainer, task, epoch_itr)
107

Myle Ott's avatar
Myle Ott committed
108
109
        if epoch_itr.epoch % args.validate_interval == 0:
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
110
111

        # only use first validation loss to update the learning rate
Myle Ott's avatar
Myle Ott committed
112
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
113
114

        # save checkpoint
Myle Ott's avatar
Myle Ott committed
115
116
        if epoch_itr.epoch % args.save_interval == 0:
            save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
117
118
119
120
    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))


Myle Ott's avatar
Myle Ott committed
121
def train(args, trainer, task, epoch_itr):
122
    """Train the model for one epoch."""
123
    # Update parameters every N batches
Myle Ott's avatar
Myle Ott committed
124
125
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
            if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
Myle Ott's avatar
Myle Ott committed
126
127
128
129
130
131

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
132
133
134
135
136
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

137
    extra_meters = collections.defaultdict(lambda: AverageMeter())
138
    first_valid = args.valid_subset.split(',')[0]
139
    max_update = args.max_update or math.inf
140
141
142
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
        log_output = trainer.train_step(samples)
        if log_output is None:
143
144
145
146
147
            continue

        # log mid-epoch stats
        stats = get_training_stats(trainer)
        for k, v in log_output.items():
148
            if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
149
150
151
152
153
154
                continue  # these are already logged above
            if 'loss' in k:
                extra_meters[k].update(v, log_output['sample_size'])
            else:
                extra_meters[k].update(v)
            stats[k] = extra_meters[k].avg
Myle Ott's avatar
Myle Ott committed
155
        progress.log(stats, tag='train', step=stats['num_updates'])
156
157
158
159
160

        # ignore the first mini-batch in words-per-second calculation
        if i == 0:
            trainer.get_meter('wps').reset()

161
        num_updates = trainer.get_num_updates()
162
        if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:
Myle Ott's avatar
Myle Ott committed
163
164
            valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])
            save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
165
166

        if num_updates >= max_update:
167
168
169
170
171
172
            break

    # log end-of-epoch stats
    stats = get_training_stats(trainer)
    for k, meter in extra_meters.items():
        stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
173
    progress.print(stats, tag='train', step=stats['num_updates'])
174

Myle Ott's avatar
Myle Ott committed
175
    # reset training meters
176
177
178
    for k in [
        'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
    ]:
Myle Ott's avatar
Myle Ott committed
179
180
181
182
        meter = trainer.get_meter(k)
        if meter is not None:
            meter.reset()

183
184
185

def get_training_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
186
    stats['loss'] = trainer.get_meter('train_loss')
187
    if trainer.get_meter('train_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
188
189
        nll_loss = trainer.get_meter('train_nll_loss')
        stats['nll_loss'] = nll_loss
190
    else:
Myle Ott's avatar
Myle Ott committed
191
192
193
194
195
196
        nll_loss = trainer.get_meter('train_loss')
    stats['ppl'] = get_perplexity(nll_loss.avg)
    stats['wps'] = trainer.get_meter('wps')
    stats['ups'] = trainer.get_meter('ups')
    stats['wpb'] = trainer.get_meter('wpb')
    stats['bsz'] = trainer.get_meter('bsz')
197
198
    stats['num_updates'] = trainer.get_num_updates()
    stats['lr'] = trainer.get_lr()
Myle Ott's avatar
Myle Ott committed
199
200
201
    stats['gnorm'] = trainer.get_meter('gnorm')
    stats['clip'] = trainer.get_meter('clip')
    stats['oom'] = trainer.get_meter('oom')
202
    if trainer.get_meter('loss_scale') is not None:
Myle Ott's avatar
Myle Ott committed
203
        stats['loss_scale'] = trainer.get_meter('loss_scale')
204
    stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
Myle Ott's avatar
Myle Ott committed
205
    stats['train_wall'] = trainer.get_meter('train_wall')
206
207
208
    return stats


Myle Ott's avatar
Myle Ott committed
209
def validate(args, trainer, task, epoch_itr, subsets):
210
211
212
    """Evaluate the model on the validation set(s) and return the losses."""
    valid_losses = []
    for subset in subsets:
Myle Ott's avatar
Myle Ott committed
213
        # Initialize data iterator
214
        itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
215
            dataset=task.dataset(subset),
216
217
            max_tokens=args.max_tokens,
            max_sentences=args.max_sentences_valid,
218
219
220
221
            max_positions=utils.resolve_max_positions(
                task.max_positions(),
                trainer.get_model().max_positions(),
            ),
Myle Ott's avatar
Myle Ott committed
222
            ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
223
            required_batch_size_multiple=args.required_batch_size_multiple,
Myle Ott's avatar
Myle Ott committed
224
            seed=args.seed,
225
            num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
226
            shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
227
            num_workers=args.num_workers,
Myle Ott's avatar
Myle Ott committed
228
        ).next_epoch_itr(shuffle=False)
229
        progress = progress_bar.build_progress_bar(
Myle Ott's avatar
Myle Ott committed
230
            args, itr, epoch_itr.epoch,
231
232
233
234
235
236
237
238
239
240
            prefix='valid on \'{}\' subset'.format(subset),
            no_progress_bar='simple'
        )

        # reset validation loss meters
        for k in ['valid_loss', 'valid_nll_loss']:
            meter = trainer.get_meter(k)
            if meter is not None:
                meter.reset()
        extra_meters = collections.defaultdict(lambda: AverageMeter())
Myle Ott's avatar
Myle Ott committed
241

242
243
244
245
        for sample in progress:
            log_output = trainer.valid_step(sample)

            for k, v in log_output.items():
246
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
247
248
                    continue
                extra_meters[k].update(v)
249

250
251
252
253
        # log validation stats
        stats = get_valid_stats(trainer)
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
254
        progress.print(stats, tag=subset, step=trainer.get_num_updates())
255

Myle Ott's avatar
Myle Ott committed
256
        valid_losses.append(stats['loss'].avg)
257
    return valid_losses
258
259
260
261


def get_valid_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
262
    stats['loss'] = trainer.get_meter('valid_loss')
263
    if trainer.get_meter('valid_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
264
265
        nll_loss = trainer.get_meter('valid_nll_loss')
        stats['nll_loss'] = nll_loss
266
    else:
Myle Ott's avatar
Myle Ott committed
267
268
        nll_loss = stats['loss']
    stats['ppl'] = get_perplexity(nll_loss.avg)
Myle Ott's avatar
Nits  
Myle Ott committed
269
270
    stats['num_updates'] = trainer.get_num_updates()
    if hasattr(save_checkpoint, 'best'):
Myle Ott's avatar
Myle Ott committed
271
        stats['best_loss'] = min(save_checkpoint.best, stats['loss'].avg)
272
273
274
275
276
277
278
279
280
281
    return stats


def get_perplexity(loss):
    try:
        return '{:.2f}'.format(math.pow(2, loss))
    except OverflowError:
        return float('inf')


Myle Ott's avatar
Myle Ott committed
282
283
def save_checkpoint(args, trainer, epoch_itr, val_loss):
    if args.no_save or not distributed_utils.is_master(args):
284
        return
Myle Ott's avatar
Myle Ott committed
285
286
287
288

    write_timer = StopwatchMeter()
    write_timer.start()

Myle Ott's avatar
Myle Ott committed
289
290
    epoch = epoch_itr.epoch
    end_of_epoch = epoch_itr.end_of_epoch()
291
292
293
294
    updates = trainer.get_num_updates()

    checkpoint_conds = collections.OrderedDict()
    checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
Alexei Baevski's avatar
Alexei Baevski committed
295
296
            end_of_epoch and not args.no_epoch_checkpoints and
            epoch % args.save_interval == 0
297
298
    )
    checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
Alexei Baevski's avatar
Alexei Baevski committed
299
300
            not end_of_epoch and args.save_interval_updates > 0 and
            updates % args.save_interval_updates == 0
301
302
    )
    checkpoint_conds['checkpoint_best.pt'] = (
Alexei Baevski's avatar
Alexei Baevski committed
303
304
            val_loss is not None and
            (not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
305
306
307
    )
    checkpoint_conds['checkpoint_last.pt'] = True  # keep this last so that it's a symlink

Myle Ott's avatar
Myle Ott committed
308
309
310
    prev_best = getattr(save_checkpoint, 'best', val_loss)
    if val_loss is not None:
        save_checkpoint.best = min(val_loss, prev_best)
311
    extra_state = {
Myle Ott's avatar
Myle Ott committed
312
        'train_iterator': epoch_itr.state_dict(),
313
314
        'val_loss': val_loss,
    }
Naman Goyal's avatar
Naman Goyal committed
315
316
    if hasattr(save_checkpoint, 'best'):
        extra_state.update({'best': save_checkpoint.best})
317

318
319
    checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
    if len(checkpoints) > 0:
320
321
        for cp in checkpoints:
            trainer.save_checkpoint(cp, extra_state)
322
323
324
325

    if not end_of_epoch and args.keep_interval_updates > 0:
        # remove old checkpoints; checkpoints are sorted in descending order
        checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt')
326
        for old_chk in checkpoints[args.keep_interval_updates:]:
Myle Ott's avatar
Myle Ott committed
327
328
329
330
331
            if os.path.lexists(old_chk):
                os.remove(old_chk)

    if args.keep_last_epochs > 0:
        # remove old epoch checkpoints; checkpoints are sorted in descending order
332
        checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint(\d+)\.pt')
Myle Ott's avatar
Myle Ott committed
333
334
335
        for old_chk in checkpoints[args.keep_last_epochs:]:
            if os.path.lexists(old_chk):
                os.remove(old_chk)
336

Myle Ott's avatar
Myle Ott committed
337
338
339
340
341
    write_timer.stop()

    print('| saved checkpoint {} (epoch {} @ {} updates) (writing took {} seconds)'.format(
        checkpoints[0], epoch, updates, write_timer.sum))

342

Myle Ott's avatar
Myle Ott committed
343
344
def load_checkpoint(args, trainer, epoch_itr):
    """Load a checkpoint and replay dataloader to match."""
345
    os.makedirs(args.save_dir, exist_ok=True)
346
347
348
349
    if os.path.isabs(args.restore_file):
        checkpoint_path = args.restore_file
    else:
        checkpoint_path = os.path.join(args.save_dir, args.restore_file)
350
    if os.path.isfile(checkpoint_path):
351
352
        extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
                                              eval(args.optimizer_overrides))
353
        if extra_state is not None:
Myle Ott's avatar
Myle Ott committed
354
355
356
357
358
            # replay train iterator to match checkpoint
            epoch_itr.load_state_dict(extra_state['train_iterator'])

            print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
                checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
alexeib's avatar
alexeib committed
359

Myle Ott's avatar
Myle Ott committed
360
361
            trainer.lr_step(epoch_itr.epoch)
            trainer.lr_step_update(trainer.get_num_updates())
362
363
            if 'best' in extra_state:
                save_checkpoint.best = extra_state['best']
364
        return True
365
366
    else:
        print('| no existing checkpoint found {}'.format(checkpoint_path))
367
    return False
368

369

370
371
372
373
374
375
376
377
378
379
380
def load_dataset_splits(args, task):
    task.load_dataset(args.train_subset, combine=True)
    for split in args.valid_subset.split(','):
        for k in itertools.count():
            split_k = split + (str(k) if k > 0 else '')
            try:
                task.load_dataset(split_k, combine=False)
            except FileNotFoundError as e:
                if k > 0:
                    break
                raise e
Sergey Edunov's avatar
Sergey Edunov committed
381

Myle Ott's avatar
Myle Ott committed
382

Myle Ott's avatar
Myle Ott committed
383
384
385
386
def distributed_main(i, args):
    args.device_id = i
    if args.distributed_rank is None:  # torch.multiprocessing.spawn
        args.distributed_rank = i
387
    main(args, init_distributed=True)
Myle Ott's avatar
Myle Ott committed
388
389


Myle Ott's avatar
Myle Ott committed
390
def cli_main():
Myle Ott's avatar
Myle Ott committed
391
392
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)
393

Myle Ott's avatar
Myle Ott committed
394
395
    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)
396

Myle Ott's avatar
Myle Ott committed
397
398
399
    if args.distributed_init_method is not None:
        # distributed training
        distributed_main(args.device_id, args)
400
    elif args.distributed_world_size > 1:
Myle Ott's avatar
Myle Ott committed
401
        # fallback for single node with multiple GPUs
402
403
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
Myle Ott's avatar
Myle Ott committed
404
        args.distributed_rank = None  # set based on device id
Myle Ott's avatar
Myle Ott committed
405
406
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
Myle Ott's avatar
Myle Ott committed
407
408
409
410
411
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
412
    else:
Myle Ott's avatar
Myle Ott committed
413
        # single GPU training
414
        main(args)
Myle Ott's avatar
Myle Ott committed
415
416
417
418


if __name__ == '__main__':
    cli_main()