train.py 16.6 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
#!/usr/bin/env python3 -u
Sergey Edunov's avatar
Sergey Edunov committed
2
3
4
5
6
7
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
Myle Ott's avatar
Myle Ott committed
8
9
10
"""
Train a new model on one or across multiple GPUs.
"""
Sergey Edunov's avatar
Sergey Edunov committed
11

12
import collections
Myle Ott's avatar
Myle Ott committed
13
import itertools
14
import math
Myle Ott's avatar
Myle Ott committed
15
import os
16
import random
Myle Ott's avatar
Myle Ott committed
17
import shutil
18

19
import torch
Sergey Edunov's avatar
Sergey Edunov committed
20

Myle Ott's avatar
Myle Ott committed
21
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
22
from fairseq.data import iterators
23
24
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
Sergey Edunov's avatar
Sergey Edunov committed
25

Myle Ott's avatar
Myle Ott committed
26

27
def main(args, init_distributed=False):
Myle Ott's avatar
Myle Ott committed
28
    utils.import_user_module(args)
29

30
31
    assert args.max_tokens is not None or args.max_sentences is not None, \
        'Must specify batch size either with --max-tokens or --max-sentences'
32

33
    # Initialize CUDA and distributed training
Myle Ott's avatar
Myle Ott committed
34
35
    if torch.cuda.is_available() and not args.cpu:
        torch.cuda.set_device(args.device_id)
36
    torch.manual_seed(args.seed)
37
38
39
40
41
    if init_distributed:
        args.distributed_rank = distributed_utils.distributed_init(args)

    # Print args
    print(args)
42

Myle Ott's avatar
Myle Ott committed
43
44
    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)
45

Myle Ott's avatar
Myle Ott committed
46
    # Load dataset splits
Naman Goyal's avatar
Naman Goyal committed
47
48
49
    task.load_dataset(args.train_subset, combine=True, epoch=0)
    for valid_sub_split in args.valid_subset.split(','):
        task.load_dataset(valid_sub_split, combine=True, epoch=0)
50

Myle Ott's avatar
Myle Ott committed
51
52
53
    # Build model and criterion
    model = task.build_model(args)
    criterion = task.build_criterion(args)
54
    print(model)
55
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
56
57
58
59
    print('| num. model params: {} (num. trained: {})'.format(
        sum(p.numel() for p in model.parameters()),
        sum(p.numel() for p in model.parameters() if p.requires_grad),
    ))
60
61

    # Build trainer
Myle Ott's avatar
Myle Ott committed
62
    trainer = Trainer(args, task, model, criterion)
63
64
65
66
67
68
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

Naman Goyal's avatar
Naman Goyal committed
69
70
71
72
    max_positions = utils.resolve_max_positions(
        task.max_positions(),
        model.max_positions(),
    )
73
    # Initialize dataloader
74
    epoch_itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
75
        dataset=task.dataset(args.train_subset),
76
        max_tokens=args.max_tokens,
77
        max_sentences=args.max_sentences,
Naman Goyal's avatar
Naman Goyal committed
78
        max_positions=max_positions,
Myle Ott's avatar
Myle Ott committed
79
        ignore_invalid_inputs=True,
80
        required_batch_size_multiple=args.required_batch_size_multiple,
81
82
        seed=args.seed,
        num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
83
        shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
84
        num_workers=args.num_workers,
85
86
87
    )

    # Load the latest checkpoint if one is available
Naman Goyal's avatar
Naman Goyal committed
88
    load_checkpoint(args, trainer, epoch_itr, max_positions, task)
89
90
91
92
93
94
95

    # Train until the learning rate gets too small
    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
Myle Ott's avatar
Myle Ott committed
96
    valid_losses = [None]
97
    valid_subsets = args.valid_subset.split(',')
Myle Ott's avatar
Myle Ott committed
98
    while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
99
        # train for one epoch
Myle Ott's avatar
Myle Ott committed
100
        train(args, trainer, task, epoch_itr)
101

Myle Ott's avatar
Myle Ott committed
102
103
        if epoch_itr.epoch % args.validate_interval == 0:
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
104
105

        # only use first validation loss to update the learning rate
Myle Ott's avatar
Myle Ott committed
106
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
107
108

        # save checkpoint
Myle Ott's avatar
Myle Ott committed
109
110
        if epoch_itr.epoch % args.save_interval == 0:
            save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
Naman Goyal's avatar
Naman Goyal committed
111
112

        epoch_itr = reload_train(args, epoch_itr, max_positions, task)
113
114
115
116
    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))


Naman Goyal's avatar
Naman Goyal committed
117
118
def reload_train(args, epoch_itr, max_positions, task):
    # nothing needs to be done when the dataset is not sharded.
Jay Mahadeokar's avatar
Jay Mahadeokar committed
119
    if "data" not in args or ("data" in args and len(args.data.split(":")) == 1):
Naman Goyal's avatar
Naman Goyal committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
        return epoch_itr
    print("| Reloading shard of train data at epoch: ", epoch_itr.epoch)
    task.load_dataset(args.train_subset, combine=True, epoch=epoch_itr.epoch)
    epoch_itr = task.get_batch_iterator(
        dataset=task.dataset(args.train_subset),
        max_tokens=args.max_tokens,
        max_sentences=args.max_sentences,
        max_positions=max_positions,
        ignore_invalid_inputs=True,
        required_batch_size_multiple=args.required_batch_size_multiple,
        seed=args.seed,
        num_shards=args.distributed_world_size,
        shard_id=args.distributed_rank,
        num_workers=args.num_workers,
        epoch=epoch_itr.epoch,
    )
    return epoch_itr


Myle Ott's avatar
Myle Ott committed
139
def train(args, trainer, task, epoch_itr):
140
    """Train the model for one epoch."""
141
    # Update parameters every N batches
Myle Ott's avatar
Myle Ott committed
142
143
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
            if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
Myle Ott's avatar
Myle Ott committed
144
145
146
147
148
149

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
150
151
152
153
154
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

155
    extra_meters = collections.defaultdict(lambda: AverageMeter())
156
    valid_subsets = args.valid_subset.split(',')
157
    max_update = args.max_update or math.inf
158
159
160
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
        log_output = trainer.train_step(samples)
        if log_output is None:
161
162
163
164
165
            continue

        # log mid-epoch stats
        stats = get_training_stats(trainer)
        for k, v in log_output.items():
166
            if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
167
168
169
170
171
172
                continue  # these are already logged above
            if 'loss' in k:
                extra_meters[k].update(v, log_output['sample_size'])
            else:
                extra_meters[k].update(v)
            stats[k] = extra_meters[k].avg
Myle Ott's avatar
Myle Ott committed
173
        progress.log(stats, tag='train', step=stats['num_updates'])
174
175
176
177
178

        # ignore the first mini-batch in words-per-second calculation
        if i == 0:
            trainer.get_meter('wps').reset()

179
        num_updates = trainer.get_num_updates()
180
        if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:
181
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
Myle Ott's avatar
Myle Ott committed
182
            save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
183
184

        if num_updates >= max_update:
185
186
187
188
189
190
            break

    # log end-of-epoch stats
    stats = get_training_stats(trainer)
    for k, meter in extra_meters.items():
        stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
191
    progress.print(stats, tag='train', step=stats['num_updates'])
192

Myle Ott's avatar
Myle Ott committed
193
    # reset training meters
194
195
196
    for k in [
        'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
    ]:
Myle Ott's avatar
Myle Ott committed
197
198
199
200
        meter = trainer.get_meter(k)
        if meter is not None:
            meter.reset()

201
202
203

def get_training_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
204
    stats['loss'] = trainer.get_meter('train_loss')
205
    if trainer.get_meter('train_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
206
207
        nll_loss = trainer.get_meter('train_nll_loss')
        stats['nll_loss'] = nll_loss
208
    else:
Myle Ott's avatar
Myle Ott committed
209
210
211
212
213
214
        nll_loss = trainer.get_meter('train_loss')
    stats['ppl'] = get_perplexity(nll_loss.avg)
    stats['wps'] = trainer.get_meter('wps')
    stats['ups'] = trainer.get_meter('ups')
    stats['wpb'] = trainer.get_meter('wpb')
    stats['bsz'] = trainer.get_meter('bsz')
215
216
    stats['num_updates'] = trainer.get_num_updates()
    stats['lr'] = trainer.get_lr()
Myle Ott's avatar
Myle Ott committed
217
218
219
    stats['gnorm'] = trainer.get_meter('gnorm')
    stats['clip'] = trainer.get_meter('clip')
    stats['oom'] = trainer.get_meter('oom')
220
    if trainer.get_meter('loss_scale') is not None:
Myle Ott's avatar
Myle Ott committed
221
        stats['loss_scale'] = trainer.get_meter('loss_scale')
222
    stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
Myle Ott's avatar
Myle Ott committed
223
    stats['train_wall'] = trainer.get_meter('train_wall')
224
225
226
    return stats


Myle Ott's avatar
Myle Ott committed
227
def validate(args, trainer, task, epoch_itr, subsets):
228
229
230
    """Evaluate the model on the validation set(s) and return the losses."""
    valid_losses = []
    for subset in subsets:
Myle Ott's avatar
Myle Ott committed
231
        # Initialize data iterator
232
        itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
233
            dataset=task.dataset(subset),
234
235
            max_tokens=args.max_tokens,
            max_sentences=args.max_sentences_valid,
236
237
238
239
            max_positions=utils.resolve_max_positions(
                task.max_positions(),
                trainer.get_model().max_positions(),
            ),
Myle Ott's avatar
Myle Ott committed
240
            ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
241
            required_batch_size_multiple=args.required_batch_size_multiple,
Myle Ott's avatar
Myle Ott committed
242
            seed=args.seed,
243
            num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
244
            shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
245
            num_workers=args.num_workers,
Myle Ott's avatar
Myle Ott committed
246
        ).next_epoch_itr(shuffle=False)
247
        progress = progress_bar.build_progress_bar(
Myle Ott's avatar
Myle Ott committed
248
            args, itr, epoch_itr.epoch,
249
250
251
252
253
254
255
256
257
258
            prefix='valid on \'{}\' subset'.format(subset),
            no_progress_bar='simple'
        )

        # reset validation loss meters
        for k in ['valid_loss', 'valid_nll_loss']:
            meter = trainer.get_meter(k)
            if meter is not None:
                meter.reset()
        extra_meters = collections.defaultdict(lambda: AverageMeter())
Myle Ott's avatar
Myle Ott committed
259

260
261
262
263
        for sample in progress:
            log_output = trainer.valid_step(sample)

            for k, v in log_output.items():
264
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
265
266
                    continue
                extra_meters[k].update(v)
267

268
269
270
271
        # log validation stats
        stats = get_valid_stats(trainer)
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
272
        progress.print(stats, tag=subset, step=trainer.get_num_updates())
273

Myle Ott's avatar
Myle Ott committed
274
        valid_losses.append(stats['loss'].avg)
275
    return valid_losses
276
277
278
279


def get_valid_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
280
    stats['loss'] = trainer.get_meter('valid_loss')
281
    if trainer.get_meter('valid_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
282
283
        nll_loss = trainer.get_meter('valid_nll_loss')
        stats['nll_loss'] = nll_loss
284
    else:
Myle Ott's avatar
Myle Ott committed
285
286
        nll_loss = stats['loss']
    stats['ppl'] = get_perplexity(nll_loss.avg)
Myle Ott's avatar
Nits  
Myle Ott committed
287
288
    stats['num_updates'] = trainer.get_num_updates()
    if hasattr(save_checkpoint, 'best'):
Myle Ott's avatar
Myle Ott committed
289
        stats['best_loss'] = min(save_checkpoint.best, stats['loss'].avg)
290
291
292
293
294
295
296
297
298
299
    return stats


def get_perplexity(loss):
    try:
        return '{:.2f}'.format(math.pow(2, loss))
    except OverflowError:
        return float('inf')


Myle Ott's avatar
Myle Ott committed
300
301
def save_checkpoint(args, trainer, epoch_itr, val_loss):
    if args.no_save or not distributed_utils.is_master(args):
302
        return
Myle Ott's avatar
Myle Ott committed
303
304
305
306

    write_timer = StopwatchMeter()
    write_timer.start()

Myle Ott's avatar
Myle Ott committed
307
308
    epoch = epoch_itr.epoch
    end_of_epoch = epoch_itr.end_of_epoch()
309
310
311
312
    updates = trainer.get_num_updates()

    checkpoint_conds = collections.OrderedDict()
    checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
Myle Ott's avatar
Myle Ott committed
313
314
        end_of_epoch and not args.no_epoch_checkpoints and
        epoch % args.save_interval == 0
315
316
    )
    checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
Myle Ott's avatar
Myle Ott committed
317
318
        not end_of_epoch and args.save_interval_updates > 0 and
        updates % args.save_interval_updates == 0
319
320
    )
    checkpoint_conds['checkpoint_best.pt'] = (
Myle Ott's avatar
Myle Ott committed
321
322
        val_loss is not None and
        (not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
323
324
325
    )
    checkpoint_conds['checkpoint_last.pt'] = True  # keep this last so that it's a symlink

Myle Ott's avatar
Myle Ott committed
326
327
328
    prev_best = getattr(save_checkpoint, 'best', val_loss)
    if val_loss is not None:
        save_checkpoint.best = min(val_loss, prev_best)
329
    extra_state = {
Myle Ott's avatar
Myle Ott committed
330
        'train_iterator': epoch_itr.state_dict(),
331
332
        'val_loss': val_loss,
    }
Naman Goyal's avatar
Naman Goyal committed
333
334
    if hasattr(save_checkpoint, 'best'):
        extra_state.update({'best': save_checkpoint.best})
335

336
337
    checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
    if len(checkpoints) > 0:
Myle Ott's avatar
Myle Ott committed
338
339
340
        trainer.save_checkpoint(checkpoints[0], extra_state)
        for cp in checkpoints[1:]:
            shutil.copyfile(checkpoints[0], cp)
341

freewym's avatar
freewym committed
342
343
344
345
        write_timer.stop()
        print('| saved checkpoint {} (epoch {} @ {} updates) (writing took {} seconds)'.format(
            checkpoints[0], epoch, updates, write_timer.sum))

346
347
    if not end_of_epoch and args.keep_interval_updates > 0:
        # remove old checkpoints; checkpoints are sorted in descending order
Myle Ott's avatar
Myle Ott committed
348
349
350
        checkpoints = checkpoint_utils.checkpoint_paths(
            args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt',
        )
351
        for old_chk in checkpoints[args.keep_interval_updates:]:
Myle Ott's avatar
Myle Ott committed
352
353
354
355
356
            if os.path.lexists(old_chk):
                os.remove(old_chk)

    if args.keep_last_epochs > 0:
        # remove old epoch checkpoints; checkpoints are sorted in descending order
Myle Ott's avatar
Myle Ott committed
357
358
359
        checkpoints = checkpoint_utils.checkpoint_paths(
            args.save_dir, pattern=r'checkpoint(\d+)\.pt',
        )
Myle Ott's avatar
Myle Ott committed
360
361
362
        for old_chk in checkpoints[args.keep_last_epochs:]:
            if os.path.lexists(old_chk):
                os.remove(old_chk)
363
364


Naman Goyal's avatar
Naman Goyal committed
365
def load_checkpoint(args, trainer, epoch_itr, max_positions, task):
Myle Ott's avatar
Myle Ott committed
366
    """Load a checkpoint and replay dataloader to match."""
367
368
369
370
    # Only rank 0 should attempt to create the required dir
    if args.distributed_rank == 0:
        os.makedirs(args.save_dir, exist_ok=True)

371
372
373
374
    if os.path.isabs(args.restore_file):
        checkpoint_path = args.restore_file
    else:
        checkpoint_path = os.path.join(args.save_dir, args.restore_file)
375
    if os.path.isfile(checkpoint_path):
376
377
        extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
                                              eval(args.optimizer_overrides))
378
        if extra_state is not None:
Myle Ott's avatar
Myle Ott committed
379
            # replay train iterator to match checkpoint
Naman Goyal's avatar
Naman Goyal committed
380
381
382
383
384
385
386
387
            epoch_itr_state = extra_state['train_iterator']

            # If the loaded checkpoint is not at epoch 0, reload train dataset,
            # as it could be potentially sharded.
            if epoch_itr_state['epoch'] != 0:
                epoch_itr = reload_train(args, epoch_itr, max_positions, task)

            epoch_itr.load_state_dict(epoch_itr_state)
Myle Ott's avatar
Myle Ott committed
388
389
390

            print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
                checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
alexeib's avatar
alexeib committed
391

Myle Ott's avatar
Myle Ott committed
392
393
            trainer.lr_step(epoch_itr.epoch)
            trainer.lr_step_update(trainer.get_num_updates())
394
            if 'best' in extra_state and not args.reset_optimizer:
395
                save_checkpoint.best = extra_state['best']
396
        return True
397
398
    else:
        print('| no existing checkpoint found {}'.format(checkpoint_path))
399
    return False
400

401

402
def distributed_main(i, args, start_rank=0):
Myle Ott's avatar
Myle Ott committed
403
404
    args.device_id = i
    if args.distributed_rank is None:  # torch.multiprocessing.spawn
405
406
        args.distributed_rank = start_rank + i
    main(args, init_distributed=True)
Myle Ott's avatar
Myle Ott committed
407
408


Myle Ott's avatar
Myle Ott committed
409
def cli_main():
Myle Ott's avatar
Myle Ott committed
410
411
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)
412

Myle Ott's avatar
Myle Ott committed
413
414
    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)
415

Myle Ott's avatar
Myle Ott committed
416
417
    if args.distributed_init_method is not None:
        # distributed training
418
419
420
421
422
423
424
425
426
427
        if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
            start_rank = args.distributed_rank
            args.distributed_rank = None  # assign automatically
            torch.multiprocessing.spawn(
                fn=distributed_main,
                args=(args, start_rank),
                nprocs=torch.cuda.device_count(),
            )
        else:
            distributed_main(args.device_id, args)
428
    elif args.distributed_world_size > 1:
Myle Ott's avatar
Myle Ott committed
429
        # fallback for single node with multiple GPUs
430
        assert args.distributed_world_size <= torch.cuda.device_count()
431
432
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
Myle Ott's avatar
Myle Ott committed
433
        args.distributed_rank = None  # set based on device id
Myle Ott's avatar
Myle Ott committed
434
435
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
Myle Ott's avatar
Myle Ott committed
436
437
438
439
440
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
441
    else:
Myle Ott's avatar
Myle Ott committed
442
        # single GPU training
443
        main(args)
Myle Ott's avatar
Myle Ott committed
444
445
446
447


if __name__ == '__main__':
    cli_main()