train.py 12.4 KB
Newer Older
Myle Ott's avatar
Myle Ott committed
1
#!/usr/bin/env python3 -u
2
# Copyright (c) Facebook, Inc. and its affiliates.
Sergey Edunov's avatar
Sergey Edunov committed
3
#
4
5
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
Myle Ott's avatar
Myle Ott committed
6
7
8
"""
Train a new model on one or across multiple GPUs.
"""
Sergey Edunov's avatar
Sergey Edunov committed
9

10
11
import collections
import math
12
13
import random

14
import numpy as np
15
import torch
Sergey Edunov's avatar
Sergey Edunov committed
16

Myle Ott's avatar
Myle Ott committed
17
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
18
from fairseq.data import iterators
19
20
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
Sergey Edunov's avatar
Sergey Edunov committed
21

Myle Ott's avatar
Myle Ott committed
22

23
def main(args, init_distributed=False):
Myle Ott's avatar
Myle Ott committed
24
    utils.import_user_module(args)
25

26
27
    assert args.max_tokens is not None or args.max_sentences is not None, \
        'Must specify batch size either with --max-tokens or --max-sentences'
28

29
    # Initialize CUDA and distributed training
Myle Ott's avatar
Myle Ott committed
30
31
    if torch.cuda.is_available() and not args.cpu:
        torch.cuda.set_device(args.device_id)
32
    np.random.seed(args.seed)
33
    torch.manual_seed(args.seed)
34
35
36
    if init_distributed:
        args.distributed_rank = distributed_utils.distributed_init(args)

37
38
39
    if distributed_utils.is_master(args):
        checkpoint_utils.verify_checkpoint_directory(args.save_dir)

40
41
    # Print args
    print(args)
42

Myle Ott's avatar
Myle Ott committed
43
44
    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)
45

Myle Ott's avatar
Myle Ott committed
46
    # Load valid dataset (we load training data below, based on the latest checkpoint)
Naman Goyal's avatar
Naman Goyal committed
47
    for valid_sub_split in args.valid_subset.split(','):
48
        task.load_dataset(valid_sub_split, combine=False, epoch=0)
49

Myle Ott's avatar
Myle Ott committed
50
51
52
    # Build model and criterion
    model = task.build_model(args)
    criterion = task.build_criterion(args)
53
    print(model)
54
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
55
56
57
58
    print('| num. model params: {} (num. trained: {})'.format(
        sum(p.numel() for p in model.parameters()),
        sum(p.numel() for p in model.parameters() if p.requires_grad),
    ))
59
60

    # Build trainer
Myle Ott's avatar
Myle Ott committed
61
    trainer = Trainer(args, task, model, criterion)
62
63
64
65
66
67
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

Myle Ott's avatar
Myle Ott committed
68
69
70
    # Load the latest checkpoint if one is available and restore the
    # corresponding train iterator
    extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
71
72
73
74
75
76
77

    # Train until the learning rate gets too small
    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
78
    valid_subsets = args.valid_subset.split(',')
79
80
81
82
83
84
    while (
        lr > args.min_lr
        and (epoch_itr.epoch < max_epoch or (epoch_itr.epoch == max_epoch
            and epoch_itr._next_epoch_itr is not None))
        and trainer.get_num_updates() < max_update
    ):
85
        # train for one epoch
Myle Ott's avatar
Myle Ott committed
86
        train(args, trainer, task, epoch_itr)
87

Myle Ott's avatar
Myle Ott committed
88
        if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
Myle Ott's avatar
Myle Ott committed
89
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
Myle Ott's avatar
Myle Ott committed
90
91
        else:
            valid_losses = [None]
92
93

        # only use first validation loss to update the learning rate
Myle Ott's avatar
Myle Ott committed
94
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
95
96

        # save checkpoint
Myle Ott's avatar
Myle Ott committed
97
        if epoch_itr.epoch % args.save_interval == 0:
Myle Ott's avatar
Myle Ott committed
98
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
Naman Goyal's avatar
Naman Goyal committed
99

100
101
102
        reload_dataset = ':' in getattr(args, 'data', '')
        # sharded data: get train iterator for next epoch
        epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)
103
104
105
106
    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))


Myle Ott's avatar
Myle Ott committed
107
def train(args, trainer, task, epoch_itr):
108
    """Train the model for one epoch."""
109
    # Update parameters every N batches
Myle Ott's avatar
Myle Ott committed
110
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
Myle Ott's avatar
Myle Ott committed
111
        if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
Myle Ott's avatar
Myle Ott committed
112
113
114
115
116
117

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
118
119
120
121
122
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

123
    extra_meters = collections.defaultdict(lambda: AverageMeter())
124
    valid_subsets = args.valid_subset.split(',')
125
    max_update = args.max_update or math.inf
126
127
128
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
        log_output = trainer.train_step(samples)
        if log_output is None:
129
130
131
132
133
            continue

        # log mid-epoch stats
        stats = get_training_stats(trainer)
        for k, v in log_output.items():
134
            if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
135
                continue  # these are already logged above
136
            if 'loss' in k or k == 'accuracy':
137
138
139
140
                extra_meters[k].update(v, log_output['sample_size'])
            else:
                extra_meters[k].update(v)
            stats[k] = extra_meters[k].avg
Myle Ott's avatar
Myle Ott committed
141
        progress.log(stats, tag='train', step=stats['num_updates'])
142

143
        # ignore the first mini-batch in words-per-second and updates-per-second calculation
144
145
        if i == 0:
            trainer.get_meter('wps').reset()
146
            trainer.get_meter('ups').reset()
147

148
        num_updates = trainer.get_num_updates()
Myle Ott's avatar
Myle Ott committed
149
150
151
152
153
154
        if (
            not args.disable_validation
            and args.save_interval_updates > 0
            and num_updates % args.save_interval_updates == 0
            and num_updates > 0
        ):
155
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
156
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
157
158

        if num_updates >= max_update:
159
160
161
162
163
164
            break

    # log end-of-epoch stats
    stats = get_training_stats(trainer)
    for k, meter in extra_meters.items():
        stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
165
    progress.print(stats, tag='train', step=stats['num_updates'])
166

Myle Ott's avatar
Myle Ott committed
167
    # reset training meters
168
169
170
    for k in [
        'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
    ]:
Myle Ott's avatar
Myle Ott committed
171
172
173
174
        meter = trainer.get_meter(k)
        if meter is not None:
            meter.reset()

175
176
177

def get_training_stats(trainer):
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
178
    stats['loss'] = trainer.get_meter('train_loss')
179
    if trainer.get_meter('train_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
180
181
        nll_loss = trainer.get_meter('train_nll_loss')
        stats['nll_loss'] = nll_loss
182
    else:
Myle Ott's avatar
Myle Ott committed
183
        nll_loss = trainer.get_meter('train_loss')
184
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
Myle Ott's avatar
Myle Ott committed
185
186
187
188
    stats['wps'] = trainer.get_meter('wps')
    stats['ups'] = trainer.get_meter('ups')
    stats['wpb'] = trainer.get_meter('wpb')
    stats['bsz'] = trainer.get_meter('bsz')
189
190
    stats['num_updates'] = trainer.get_num_updates()
    stats['lr'] = trainer.get_lr()
Myle Ott's avatar
Myle Ott committed
191
192
193
    stats['gnorm'] = trainer.get_meter('gnorm')
    stats['clip'] = trainer.get_meter('clip')
    stats['oom'] = trainer.get_meter('oom')
194
    if trainer.get_meter('loss_scale') is not None:
Myle Ott's avatar
Myle Ott committed
195
        stats['loss_scale'] = trainer.get_meter('loss_scale')
196
    stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
Myle Ott's avatar
Myle Ott committed
197
    stats['train_wall'] = trainer.get_meter('train_wall')
198
199
200
    return stats


Myle Ott's avatar
Myle Ott committed
201
def validate(args, trainer, task, epoch_itr, subsets):
202
    """Evaluate the model on the validation set(s) and return the losses."""
203
204
205
206
207

    if args.fixed_validation_seed is not None:
        # set fixed seed for every validation
        utils.set_torch_seed(args.fixed_validation_seed)

208
209
    valid_losses = []
    for subset in subsets:
Myle Ott's avatar
Myle Ott committed
210
        # Initialize data iterator
211
        itr = task.get_batch_iterator(
Myle Ott's avatar
Myle Ott committed
212
            dataset=task.dataset(subset),
213
            max_tokens=args.max_tokens_valid,
214
            max_sentences=args.max_sentences_valid,
215
216
217
218
            max_positions=utils.resolve_max_positions(
                task.max_positions(),
                trainer.get_model().max_positions(),
            ),
Myle Ott's avatar
Myle Ott committed
219
            ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
220
            required_batch_size_multiple=args.required_batch_size_multiple,
Myle Ott's avatar
Myle Ott committed
221
            seed=args.seed,
222
            num_shards=args.distributed_world_size,
Myle Ott's avatar
Myle Ott committed
223
            shard_id=args.distributed_rank,
Myle Ott's avatar
Myle Ott committed
224
            num_workers=args.num_workers,
Myle Ott's avatar
Myle Ott committed
225
        ).next_epoch_itr(shuffle=False)
226
        progress = progress_bar.build_progress_bar(
Myle Ott's avatar
Myle Ott committed
227
            args, itr, epoch_itr.epoch,
228
229
230
231
232
233
234
235
236
237
            prefix='valid on \'{}\' subset'.format(subset),
            no_progress_bar='simple'
        )

        # reset validation loss meters
        for k in ['valid_loss', 'valid_nll_loss']:
            meter = trainer.get_meter(k)
            if meter is not None:
                meter.reset()
        extra_meters = collections.defaultdict(lambda: AverageMeter())
Myle Ott's avatar
Myle Ott committed
238

239
240
241
242
        for sample in progress:
            log_output = trainer.valid_step(sample)

            for k, v in log_output.items():
243
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
244
245
                    continue
                extra_meters[k].update(v)
246

247
        # log validation stats
248
        stats = get_valid_stats(trainer, args, extra_meters)
249
250
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
Myle Ott's avatar
Myle Ott committed
251
        progress.print(stats, tag=subset, step=trainer.get_num_updates())
252

253
254
255
256
257
        valid_losses.append(
            stats[args.best_checkpoint_metric].avg
            if args.best_checkpoint_metric == 'loss'
            else stats[args.best_checkpoint_metric]
        )
258
    return valid_losses
259
260


261
def get_valid_stats(trainer, args, extra_meters=None):
262
    stats = collections.OrderedDict()
Myle Ott's avatar
Myle Ott committed
263
    stats['loss'] = trainer.get_meter('valid_loss')
264
    if trainer.get_meter('valid_nll_loss').count > 0:
Myle Ott's avatar
Myle Ott committed
265
266
        nll_loss = trainer.get_meter('valid_nll_loss')
        stats['nll_loss'] = nll_loss
267
    else:
Myle Ott's avatar
Myle Ott committed
268
        nll_loss = stats['loss']
269
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
Myle Ott's avatar
Nits  
Myle Ott committed
270
    stats['num_updates'] = trainer.get_num_updates()
271
    if hasattr(checkpoint_utils.save_checkpoint, 'best'):
272
        key = 'best_{0}'.format(args.best_checkpoint_metric)
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
        best_function = max if args.maximize_best_checkpoint_metric else min

        current_metric = None
        if args.best_checkpoint_metric == 'loss':
            current_metric = stats['loss'].avg
        elif args.best_checkpoint_metric in extra_meters:
            current_metric = extra_meters[args.best_checkpoint_metric].avg
        elif args.best_checkpoint_metric in stats:
            current_metric = stats[args.best_checkpoint_metric]
        else:
            raise ValueError("best_checkpoint_metric not found in logs")

        stats[key] = best_function(
            checkpoint_utils.save_checkpoint.best,
            current_metric,
        )
289
290
291
    return stats


292
def distributed_main(i, args, start_rank=0):
Myle Ott's avatar
Myle Ott committed
293
294
    args.device_id = i
    if args.distributed_rank is None:  # torch.multiprocessing.spawn
295
296
        args.distributed_rank = start_rank + i
    main(args, init_distributed=True)
Myle Ott's avatar
Myle Ott committed
297
298


Myle Ott's avatar
Myle Ott committed
299
def cli_main():
Myle Ott's avatar
Myle Ott committed
300
301
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)
302

Myle Ott's avatar
Myle Ott committed
303
304
    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)
305

Myle Ott's avatar
Myle Ott committed
306
307
    if args.distributed_init_method is not None:
        # distributed training
308
309
310
311
312
313
314
315
316
317
        if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
            start_rank = args.distributed_rank
            args.distributed_rank = None  # assign automatically
            torch.multiprocessing.spawn(
                fn=distributed_main,
                args=(args, start_rank),
                nprocs=torch.cuda.device_count(),
            )
        else:
            distributed_main(args.device_id, args)
318
    elif args.distributed_world_size > 1:
Myle Ott's avatar
Myle Ott committed
319
        # fallback for single node with multiple GPUs
320
        assert args.distributed_world_size <= torch.cuda.device_count()
321
322
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
Myle Ott's avatar
Myle Ott committed
323
        args.distributed_rank = None  # set based on device id
Myle Ott's avatar
Myle Ott committed
324
325
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
Myle Ott's avatar
Myle Ott committed
326
327
328
329
330
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
331
    else:
Myle Ott's avatar
Myle Ott committed
332
        # single GPU training
333
        main(args)
Myle Ott's avatar
Myle Ott committed
334
335
336
337


if __name__ == '__main__':
    cli_main()