train.py 12.2 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
#!/usr/bin/env python3 -u
Sergey Edunov's avatar
Sergey Edunov committed
2
3
4
5
6
7
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
Myle Ott's avatar
Myle Ott committed
8
9
10
"""
Train a new model on one or across multiple GPUs.
"""
Sergey Edunov's avatar
Sergey Edunov committed
11

12
13
import collections
import math
Myle Ott's avatar
Myle Ott committed
14
import os
15
16
import random

17
import torch
Sergey Edunov's avatar
Sergey Edunov committed
18

Myle Ott's avatar
Myle Ott committed
19
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
20
from fairseq.data import iterators
21
22
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
Sergey Edunov's avatar
Sergey Edunov committed
23

Myle Ott's avatar
Myle Ott committed
24

25
def main(args, init_distributed=False):
Myle Ott's avatar
Myle Ott committed
26
    utils.import_user_module(args)
27

28
29
    assert args.max_tokens is not None or args.max_sentences is not None, \
        'Must specify batch size either with --max-tokens or --max-sentences'
30

31
    # Initialize CUDA and distributed training
Myle Ott's avatar
Myle Ott committed
32
33
    if torch.cuda.is_available() and not args.cpu:
        torch.cuda.set_device(args.device_id)
34
    torch.manual_seed(args.seed)
35
36
37
    if init_distributed:
        args.distributed_rank = distributed_utils.distributed_init(args)

38
39
40
    if distributed_utils.is_master(args):
        checkpoint_utils.verify_checkpoint_directory(args.save_dir)

41
42
    # Print args
    print(args)
43

Myle Ott's avatar
Myle Ott committed
44
45
    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)
46

Myle Ott's avatar
Myle Ott committed
47
    # Load valid dataset (we load training data below, based on the latest checkpoint)
Naman Goyal's avatar
Naman Goyal committed
48
    for valid_sub_split in args.valid_subset.split(','):
49
        task.load_dataset(valid_sub_split, combine=False, epoch=0)
50

Myle Ott's avatar
Myle Ott committed
51
52
53
    # Build model and criterion
    model = task.build_model(args)
    criterion = task.build_criterion(args)
54
    print(model)
55
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
56
57
58
59
    print('| num. model params: {} (num. trained: {})'.format(
        sum(p.numel() for p in model.parameters()),
        sum(p.numel() for p in model.parameters() if p.requires_grad),
    ))
60
61

    # Build trainer
Myle Ott's avatar
Myle Ott committed
62
    trainer = Trainer(args, task, model, criterion)
63
64
65
66
67
68
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

Myle Ott's avatar
Myle Ott committed
69
70
71
    # Load the latest checkpoint if one is available and restore the
    # corresponding train iterator
    extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
72
73
74
75
76
77
78

    # Train until the learning rate gets too small
    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
Myle Ott's avatar
Myle Ott committed
79
    valid_losses = [None]
80
    valid_subsets = args.valid_subset.split(',')
Myle Ott's avatar
Myle Ott committed
81
    while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
82
        # train for one epoch
Myle Ott's avatar
Myle Ott committed
83
        train(args, trainer, task, epoch_itr)
84

Myle Ott's avatar
Myle Ott committed
85
        if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
Myle Ott's avatar
Myle Ott committed
86
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
Myle Ott's avatar
Myle Ott committed
87
88
        else:
            valid_losses = [None]
89
90

        # only use first validation loss to update the learning rate
Myle Ott's avatar
Myle Ott committed
91
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
92
93

        # save checkpoint
Myle Ott's avatar
Myle Ott committed
94
        if epoch_itr.epoch % args.save_interval == 0:
Myle Ott's avatar
Myle Ott committed
95
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
Naman Goyal's avatar
Naman Goyal committed
96

97
        if ':' in getattr(args, 'data', ''):
Myle Ott's avatar
Myle Ott committed
98
99
            # sharded data: get train iterator for next epoch
            epoch_itr = trainer.get_train_iterator(epoch_itr.epoch)
100
101
102
103
    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))


Myle Ott's avatar
Myle Ott committed
104
def train(args, trainer, task, epoch_itr):
105
    """Train the model for one epoch."""
106
    # Update parameters every N batches
Myle Ott's avatar
Myle Ott committed
107
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
Myle Ott's avatar
Myle Ott committed
108
        if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
Myle Ott's avatar
Myle Ott committed
109
110
111
112
113
114

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
115
116
117
118
119
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

120
    extra_meters = collections.defaultdict(lambda: AverageMeter())
121
    valid_subsets = args.valid_subset.split(',')
122
    max_update = args.max_update or math.inf
123
124
125
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
        log_output = trainer.train_step(samples)
        if log_output is None:
126
127
128
129
130
            continue

        # log mid-epoch stats
        stats = get_training_stats(trainer)
        for k, v in log_output.items():
131
            if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
132
                continue  # these are already logged above
133
            if 'loss' in k or k == 'accuracy':
134
135
136
137
                extra_meters[k].update(v, log_output['sample_size'])
            else:
                extra_meters[k].update(v)
            stats[k] = extra_meters[k].avg
Myle Ott's avatar
Myle Ott committed
138
        progress.log(stats, tag='train', step=stats['num_updates'])
139
140
141
142
143

        # ignore the first mini-batch in words-per-second calculation
        if i == 0:
            trainer.get_meter('wps').reset()

144
        num_updates = trainer.get_num_updates()
Myle Ott's avatar
Myle Ott committed
145
146
147
148
149
150
        if (
            not args.disable_validation
            and args.save_interval_updates > 0
            and num_updates % args.save_interval_updates == 0
            and num_updates > 0
        ):
151
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
152
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
153
154

        if num_updates >= max_update:
155
156
157
158
159
160
            break

    # log end-of-epoch stats
    stats = get_training_stats(trainer)
    for k, meter in extra_meters.items():
        stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
161
    progress.print(stats, tag='train', step=stats['num_updates'])
162

Myle Ott's avatar
Myle Ott committed
163
    # reset training meters
164
165
166
    for k in [
        'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
    ]:
Myle Ott's avatar
Myle Ott committed
167
168
169
170
        meter = trainer.get_meter(k)
        if meter is not None:
            meter.reset()

171
172
173

def get_training_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
174
    stats['loss'] = trainer.get_meter('train_loss')
175
    if trainer.get_meter('train_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
176
177
        nll_loss = trainer.get_meter('train_nll_loss')
        stats['nll_loss'] = nll_loss
178
    else:
Myle Ott's avatar
Myle Ott committed
179
        nll_loss = trainer.get_meter('train_loss')
180
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
Myle Ott's avatar
Myle Ott committed
181
182
183
184
    stats['wps'] = trainer.get_meter('wps')
    stats['ups'] = trainer.get_meter('ups')
    stats['wpb'] = trainer.get_meter('wpb')
    stats['bsz'] = trainer.get_meter('bsz')
185
186
    stats['num_updates'] = trainer.get_num_updates()
    stats['lr'] = trainer.get_lr()
Myle Ott's avatar
Myle Ott committed
187
188
189
    stats['gnorm'] = trainer.get_meter('gnorm')
    stats['clip'] = trainer.get_meter('clip')
    stats['oom'] = trainer.get_meter('oom')
190
    if trainer.get_meter('loss_scale') is not None:
Myle Ott's avatar
Myle Ott committed
191
        stats['loss_scale'] = trainer.get_meter('loss_scale')
192
    stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
Myle Ott's avatar
Myle Ott committed
193
    stats['train_wall'] = trainer.get_meter('train_wall')
194
195
196
    return stats


Myle Ott's avatar
Myle Ott committed
197
def validate(args, trainer, task, epoch_itr, subsets):
198
199
200
    """Evaluate the model on the validation set(s) and return the losses."""
    valid_losses = []
    for subset in subsets:
Myle Ott's avatar
Myle Ott committed
201
        # Initialize data iterator
202
        itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
203
            dataset=task.dataset(subset),
204
            max_tokens=args.max_tokens_valid,
205
            max_sentences=args.max_sentences_valid,
206
207
208
209
            max_positions=utils.resolve_max_positions(
                task.max_positions(),
                trainer.get_model().max_positions(),
            ),
Myle Ott's avatar
Myle Ott committed
210
            ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
211
            required_batch_size_multiple=args.required_batch_size_multiple,
Myle Ott's avatar
Myle Ott committed
212
            seed=args.seed,
213
            num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
214
            shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
215
            num_workers=args.num_workers,
Myle Ott's avatar
Myle Ott committed
216
        ).next_epoch_itr(shuffle=False)
217
        progress = progress_bar.build_progress_bar(
Myle Ott's avatar
Myle Ott committed
218
            args, itr, epoch_itr.epoch,
219
220
221
222
223
224
225
226
227
228
            prefix='valid on \'{}\' subset'.format(subset),
            no_progress_bar='simple'
        )

        # reset validation loss meters
        for k in ['valid_loss', 'valid_nll_loss']:
            meter = trainer.get_meter(k)
            if meter is not None:
                meter.reset()
        extra_meters = collections.defaultdict(lambda: AverageMeter())
Myle Ott's avatar
Myle Ott committed
229

230
231
232
233
        for sample in progress:
            log_output = trainer.valid_step(sample)

            for k, v in log_output.items():
234
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
235
236
                    continue
                extra_meters[k].update(v)
237

238
        # log validation stats
239
        stats = get_valid_stats(trainer, args, extra_meters)
240
241
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
242
        progress.print(stats, tag=subset, step=trainer.get_num_updates())
243

244
245
246
247
248
        valid_losses.append(
            stats[args.best_checkpoint_metric].avg
            if args.best_checkpoint_metric == 'loss'
            else stats[args.best_checkpoint_metric]
        )
249
    return valid_losses
250
251


252
def get_valid_stats(trainer, args, extra_meters=None):
253
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
254
    stats['loss'] = trainer.get_meter('valid_loss')
255
    if trainer.get_meter('valid_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
256
257
        nll_loss = trainer.get_meter('valid_nll_loss')
        stats['nll_loss'] = nll_loss
258
    else:
Myle Ott's avatar
Myle Ott committed
259
        nll_loss = stats['loss']
260
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
Myle Ott's avatar
Nits  
Myle Ott committed
261
    stats['num_updates'] = trainer.get_num_updates()
262
    if hasattr(checkpoint_utils.save_checkpoint, 'best'):
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
        key = f'best_{args.best_checkpoint_metric}'
        best_function = max if args.maximize_best_checkpoint_metric else min

        current_metric = None
        if args.best_checkpoint_metric == 'loss':
            current_metric = stats['loss'].avg
        elif args.best_checkpoint_metric in extra_meters:
            current_metric = extra_meters[args.best_checkpoint_metric].avg
        elif args.best_checkpoint_metric in stats:
            current_metric = stats[args.best_checkpoint_metric]
        else:
            raise ValueError("best_checkpoint_metric not found in logs")

        stats[key] = best_function(
            checkpoint_utils.save_checkpoint.best,
            current_metric,
        )
280
281
282
    return stats


283
def distributed_main(i, args, start_rank=0):
Myle Ott's avatar
Myle Ott committed
284
285
    args.device_id = i
    if args.distributed_rank is None:  # torch.multiprocessing.spawn
286
287
        args.distributed_rank = start_rank + i
    main(args, init_distributed=True)
Myle Ott's avatar
Myle Ott committed
288
289


Myle Ott's avatar
Myle Ott committed
290
def cli_main():
Myle Ott's avatar
Myle Ott committed
291
292
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)
293

Myle Ott's avatar
Myle Ott committed
294
295
    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)
296

Myle Ott's avatar
Myle Ott committed
297
298
    if args.distributed_init_method is not None:
        # distributed training
299
300
301
302
303
304
305
306
307
308
        if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
            start_rank = args.distributed_rank
            args.distributed_rank = None  # assign automatically
            torch.multiprocessing.spawn(
                fn=distributed_main,
                args=(args, start_rank),
                nprocs=torch.cuda.device_count(),
            )
        else:
            distributed_main(args.device_id, args)
309
    elif args.distributed_world_size > 1:
Myle Ott's avatar
Myle Ott committed
310
        # fallback for single node with multiple GPUs
311
        assert args.distributed_world_size <= torch.cuda.device_count()
312
313
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
Myle Ott's avatar
Myle Ott committed
314
        args.distributed_rank = None  # set based on device id
Myle Ott's avatar
Myle Ott committed
315
316
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
Myle Ott's avatar
Myle Ott committed
317
318
319
320
321
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
322
    else:
Myle Ott's avatar
Myle Ott committed
323
        # single GPU training
324
        main(args)
Myle Ott's avatar
Myle Ott committed
325
326
327
328


if __name__ == '__main__':
    cli_main()