training.py 35.2 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad's avatar
Mohammad committed
16
"""Pretrain utilities."""
17
18
19

from datetime import datetime
import math
Mohammad's avatar
Mohammad committed
20
import sys
21
22
23
24
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

25
26
27
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP

Neel Kant's avatar
Neel Kant committed
28
from megatron import get_args
Mohammad's avatar
Mohammad committed
29
30
from megatron import get_timers
from megatron import get_tensorboard_writer
31
from megatron import get_current_global_batch_size
mohammad's avatar
mohammad committed
32
from megatron import get_num_microbatches
mohammad's avatar
mohammad committed
33
from megatron import is_last_rank
mohammad's avatar
mohammad committed
34
from megatron import update_num_microbatches
35
from megatron import mpu
Neel Kant's avatar
Neel Kant committed
36
from megatron import print_rank_0
37
from megatron import print_rank_last
Mohammad's avatar
Mohammad committed
38
39
from megatron.checkpointing import load_checkpoint
from megatron.checkpointing import save_checkpoint
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
40
from megatron.model import Float16Module
mohammad's avatar
mohammad committed
41
from megatron.optimizer import get_megatron_optimizer
Mohammad's avatar
Mohammad committed
42
from megatron.initialize import initialize_megatron
43
from megatron.initialize import write_args_to_tensorboard
44
45
46
from megatron.learning_rates import AnnealingLR
from megatron.model import DistributedDataParallel as LocalDDP
from megatron.utils import check_adlr_autoresume_termination
47
from megatron.utils import unwrap_model
Vijay Korthikanti's avatar
Vijay Korthikanti committed
48
from megatron.data.data_samplers import build_pretraining_data_loader
mohammad's avatar
mohammad committed
49
from megatron.utils import calc_params_l2_norm
50
from megatron.schedules import get_forward_backward_func
51
from megatron.utils import report_memory
52
53


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
54

55
56
57
58
59
60
61
def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))


62
def pretrain(train_valid_test_dataset_provider,
63
             model_provider,
64
65
             forward_step_func,
             extra_args_provider=None,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
66
             args_defaults={}):
67
68
69
    """Main training program.

    This function will run the followings in the order provided:
Mohammad's avatar
Mohammad committed
70
71
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
72
        3) call train_val_test_data_provider to get train/val/test datasets.
Mohammad's avatar
Mohammad committed
73
        4) train the modle using the forward_step_func.
74
75

    Arguments:
76
77
78
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
Mohammad's avatar
Mohammad committed
79
80
81
82
83
84
85
86
87
88
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
89
90
    """

91
    # Initalize and get arguments, timers, and Tensorboard writer.
92
93
    initialize_megatron(extra_args_provider=extra_args_provider,
                        args_defaults=args_defaults)
94

95
96
97
98
    # Adjust the startup time so it reflects the largest value.
    # This will be closer to what scheduler will see (outside of
    # image ... launches.
    global _TRAIN_START_TIME
99
    start_time_tensor = torch.cuda.DoubleTensor([_TRAIN_START_TIME])
100
101
102
    torch.distributed.all_reduce(start_time_tensor,
                                 op=torch.distributed.ReduceOp.MIN)
    _TRAIN_START_TIME = start_time_tensor.item()
mshoeybi's avatar
mshoeybi committed
103
    print_rank_0('time to initialize megatron (seconds): {:.3f}'.format(
104
105
106
        time.time() - _TRAIN_START_TIME))
    print_datetime('after megatron is initialized')

107
    args = get_args()
Mohammad's avatar
Mohammad committed
108
    timers = get_timers()
109
110

    # Model, optimizer, and learning rate.
111
    timers('model-and-optimizer-setup').start()
Mohammad's avatar
Mohammad committed
112
    model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider)
113
    timers('model-and-optimizer-setup').stop()
114
115
    print_datetime('after model, optimizer, and learning rate '
                   'scheduler are built')
116
117

    # Data stuff.
118
119
    timers('train/valid/test-data-iterators-setup').start()
    if args.virtual_pipeline_model_parallel_size is not None:
120
        all_data_iterators = [
121
122
123
            build_train_valid_test_data_iterators(train_valid_test_dataset_provider)
            for _ in range(len(model))
        ]
124
125
126
        train_data_iterator = [data_iterators[0] for data_iterators in all_data_iterators]
        valid_data_iterator = [data_iterators[1] for data_iterators in all_data_iterators]
        test_data_iterator = [data_iterators[2] for data_iterators in all_data_iterators]
127
128
129
130
131
    else:
        train_data_iterator, valid_data_iterator, test_data_iterator \
            = build_train_valid_test_data_iterators(
                train_valid_test_dataset_provider)
    timers('train/valid/test-data-iterators-setup').stop()
mshoeybi's avatar
mshoeybi committed
132
    print_datetime('after dataloaders are built')
Mohammad's avatar
Mohammad committed
133
134

    # Print setup timing.
135
136
    print_rank_0('done with setup ...')
    timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup'])
Mohammad's avatar
Mohammad committed
137
    print_rank_0('training ...')
138
139

    iteration = 0
140
    if args.do_train and args.train_iters > 0:
mohammad's avatar
mohammad committed
141
142
143
        iteration = train(forward_step_func,
                          model, optimizer, lr_scheduler,
                          train_data_iterator, valid_data_iterator)
144
    print_datetime('after training is done')
Mohammad's avatar
Mohammad committed
145

146
147
148
    if args.do_valid:
        prefix = 'the end of training for val data'
        evaluate_and_print_results(prefix, forward_step_func,
149
                                   valid_data_iterator, model,
Mohammad's avatar
Mohammad committed
150
                                   iteration, False)
151
152

    if args.save and iteration != 0:
153
        save_checkpoint(iteration, model, optimizer, lr_scheduler)
154
155
156
157
158
159

    if args.do_test:
        # Run on test data.
        prefix = 'the end of training for test data'
        evaluate_and_print_results(prefix, forward_step_func,
                                   test_data_iterator, model,
Mohammad's avatar
Mohammad committed
160
                                   0, True)
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
def update_train_iters(args):

    # For iteration-based training, we don't need to do anything
    if args.train_iters:
        return

    # Constant batch size with sample-based training.
    if args.rampup_batch_size is None:
        args.train_iters = args.train_samples // args.global_batch_size

    else:
        # Sample based training with rampup batch size.
        iterations = 0
        consumed_samples = 0
        # Rampup phase.
        while consumed_samples <= int(args.rampup_batch_size[2]):
178
179
            update_num_microbatches(consumed_samples, consistency_check=False)
            consumed_samples += get_current_global_batch_size()
180
181
            iterations += 1
        # Reset
182
        update_num_microbatches(0, consistency_check=False)
183
184
185
186
187
188
189
190
        # Constant phase
        # Note that we throw away any partial last batch.
        iterations += (args.train_samples - consumed_samples) // \
                      args.global_batch_size
        args.train_iters = iterations

    print_rank_0('setting training iterations to {}'.format(args.train_iters))

191

Mohammad's avatar
Mohammad committed
192
def get_model(model_provider_func):
193
    """Build the model."""
Mohammad's avatar
Mohammad committed
194
    args = get_args()
195

196
    # Build model.
197
198
199
200
201
    if mpu.get_pipeline_model_parallel_world_size() > 1 and \
       args.virtual_pipeline_model_parallel_size is not None:
        model = []
        for i in range(args.virtual_pipeline_model_parallel_size):
            mpu.set_virtual_pipeline_model_parallel_rank(i)
202
203
204
            # Set pre_process and post_process only after virtual rank is set.
            pre_process = mpu.is_pipeline_first_stage()
            post_process = mpu.is_pipeline_last_stage()
205
            this_model = model_provider_func(
206
207
208
                pre_process=pre_process,
                post_process=post_process
            )
209
            model.append(this_model)
210
    else:
211
212
        pre_process = mpu.is_pipeline_first_stage()
        post_process = mpu.is_pipeline_last_stage()
213
214
215
216
217
        model = model_provider_func(
            pre_process=pre_process,
            post_process=post_process
        )

218
219
    if not isinstance(model, list):
        model = [model]
220

221
    # Set tensor model parallel attributes if not set.
mohammad's avatar
mohammad committed
222
223
224
    # Only parameters that are already tensor model parallel have these
    # attributes set for them. We should make sure the default attributes
    # are set for all params so the optimizer can use them.
225
226
227
    for model_module in model:
        for param in model_module.parameters():
            mpu.set_defaults_if_not_set_tensor_model_parallel_attributes(param)
228

229
230
    # Print number of parameters.
    if mpu.get_data_parallel_rank() == 0:
231
        print(' > number of parameters on (tensor, pipeline) '
232
              'model parallel rank ({}, {}): {}'.format(
233
234
            mpu.get_tensor_model_parallel_rank(),
            mpu.get_pipeline_model_parallel_rank(),
235
236
            sum([sum([p.nelement() for p in model_module.parameters()])
                 for model_module in model])), flush=True)
237
238

    # GPU allocation.
239
240
    for model_module in model:
        model_module.cuda(torch.cuda.current_device())
241
242

    # Fp16 conversion.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
243
244
    if args.fp16 or args.bf16:
        model = [Float16Module(model_module, args) for model_module in model]
245
246
247

    if args.DDP_impl == 'torch':
        i = torch.cuda.current_device()
248
249
250
        model = [torchDDP(model_module, device_ids=[i], output_device=i,
                          process_group=mpu.get_data_parallel_group())
                 for model_module in model]
251
        return model
252

253
    if args.DDP_impl == 'local':
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
254
255
        model = [LocalDDP(model_module,
                          args.accumulate_allreduce_grads_in_fp32,
256
                          args.use_contiguous_buffers_in_local_ddp)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
257
                 for model_module in model]
258
259
        return model

260
    raise NotImplementedError('Unknown DDP implementation specified: {}. '
261
                              'Exiting.'.format(args.DDP_impl))
262
263


Mohammad's avatar
Mohammad committed
264
def get_learning_rate_scheduler(optimizer):
265
    """Build the learning rate scheduler."""
Mohammad's avatar
Mohammad committed
266
    args = get_args()
267

268
269
270
271
272
    # Iteration-based training.
    if args.train_iters:
        if args.lr_decay_iters is None:
            args.lr_decay_iters = args.train_iters
        decay_steps = args.lr_decay_iters * args.global_batch_size
273
274
        if args.lr_warmup_fraction is not None:
            warmup_steps = args.lr_warmup_fraction * decay_steps
275
276
        else:
            warmup_steps = args.lr_warmup_iters * args.global_batch_size
277
278
279
280
281
    # Sample-based training.
    elif args.train_samples:
        # We need to set training iters for later use. Technically
        # we need to adjust the training samples too (due to last
        # batch being incomplete) but we leave it as is for now.
282
        update_train_iters(args)
283
284
285
        if args.lr_decay_samples is None:
            args.lr_decay_samples = args.train_samples
        decay_steps = args.lr_decay_samples
286
287
        if args.lr_warmup_fraction is not None:
            warmup_steps = args.lr_warmup_fraction * decay_steps
288
289
        else:
            warmup_steps = args.lr_warmup_samples
290
    else:
291
292
293
        raise Exception(
            'either train-iters or train-samples should be provided.')

294
295
    lr_scheduler = AnnealingLR(
        optimizer,
296
        max_lr=args.lr,
297
        min_lr=args.min_lr,
298
299
        warmup_steps=warmup_steps,
        decay_steps=decay_steps,
300
        decay_style=args.lr_decay_style,
301
302
303
304
305
306
        use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler,
        override_lr_scheduler=args.override_lr_scheduler)

    return lr_scheduler


Mohammad's avatar
Mohammad committed
307
def setup_model_and_optimizer(model_provider_func):
308
    """Setup model and optimizer."""
Mohammad's avatar
Mohammad committed
309
    args = get_args()
310

Mohammad's avatar
Mohammad committed
311
    model = get_model(model_provider_func)
312

313
    unwrapped_model = unwrap_model(model,
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
314
                                   (torchDDP, LocalDDP, Float16Module))
315
316
    optimizer = get_megatron_optimizer(unwrapped_model)

Mohammad's avatar
Mohammad committed
317
    lr_scheduler = get_learning_rate_scheduler(optimizer)
318
319

    if args.load is not None:
320
321
322
323
        timers = get_timers()
        # Extra barrier is added to make sure all ranks report the
        # max time.
        torch.distributed.barrier()
324
        timers('load-checkpoint').start()
325
        args.iteration = load_checkpoint(model, optimizer, lr_scheduler)
326
        torch.distributed.barrier()
327
328
        timers('load-checkpoint').stop()
        timers.log(['load-checkpoint'])
329
330
331
    else:
        args.iteration = 0

mohammad's avatar
mohammad committed
332
    # We only support local DDP with multiple micro-batches.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
333
    if len(model) > 1 or mpu.get_pipeline_model_parallel_world_size() > 1:
mohammad's avatar
mohammad committed
334
335
        assert args.DDP_impl == 'local'

Neel Kant's avatar
Neel Kant committed
336
    # get model without FP16 and/or TorchDDP wrappers
Mostofa Patwary's avatar
Mostofa Patwary committed
337
338
    if args.iteration == 0 and len(unwrapped_model) == 1 \
        and hasattr(unwrapped_model[0], 'init_state_dict_from_bert'):
Mostofa Patwary's avatar
Mostofa Patwary committed
339
        print_rank_0("Initializing ICT from pretrained BERT model")
Mostofa Patwary's avatar
Mostofa Patwary committed
340
        unwrapped_model[0].init_state_dict_from_bert()
Mostofa Patwary's avatar
Mostofa Patwary committed
341
342
        if args.fp16:
            optimizer.reload_model_params()
Neel Kant's avatar
Neel Kant committed
343

344
345
346
    return model, optimizer, lr_scheduler


347
348
349
350
351
352
353
def train_step(forward_step_func, data_iterator,
               model, optimizer, lr_scheduler):
    """Single training step."""
    args = get_args()
    timers = get_timers()

    # Set grad to zero.
354
    if args.DDP_impl == 'local' and args.use_contiguous_buffers_in_local_ddp:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
355
356
        for partition in model:
            partition.zero_grad_buffer()
357
    optimizer.zero_grad()
358

359
    forward_backward_func = get_forward_backward_func()
360
361
362
    losses_reduced = forward_backward_func(
        forward_step_func, data_iterator, model,
        optimizer, timers, forward_only=False)
363

364
    # Empty unused memory
Lawrence McAfee's avatar
Lawrence McAfee committed
365
    if args.empty_unused_memory_level >= 1:
366
367
        torch.cuda.empty_cache()

368
369
    # All-reduce if needed.
    if args.DDP_impl == 'local':
370
        timers('backward-params-all-reduce').start()
371
        for model_module in model:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
372
            model_module.allreduce_gradients()
373
        timers('backward-params-all-reduce').stop()
374

375
376
377
378
    # All-reduce word_embeddings' grad across first and last stages to ensure
    # that word_embeddings parameters stay in sync.
    # This should only run for models that support pipelined model parallelism
    # (BERT and GPT-2).
379
    timers('backward-embedding-all-reduce').start()
380
381
    if (mpu.is_pipeline_first_stage(ignore_virtual=True) or
        mpu.is_pipeline_last_stage(ignore_virtual=True)) and \
382
            mpu.get_pipeline_model_parallel_world_size() > 1:
383
384
385
386
        if mpu.is_pipeline_first_stage(ignore_virtual=True):
            unwrapped_model = model[0]
        elif mpu.is_pipeline_last_stage(ignore_virtual=True):
            unwrapped_model = model[-1]
387
        unwrapped_model = unwrap_model(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
388
            unwrapped_model, (torchDDP, LocalDDP, Float16Module))
389

390
391
        if unwrapped_model.share_word_embeddings:
            word_embeddings_weight = unwrapped_model.word_embeddings_weight()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
392
393
394
395
396
            if args.DDP_impl == 'local':
                grad = word_embeddings_weight.main_grad
            else:
                grad = word_embeddings_weight.grad
            torch.distributed.all_reduce(grad, group=mpu.get_embedding_group())
397
    timers('backward-embedding-all-reduce').stop()
398

399
400
    # Update parameters.
    timers('optimizer').start()
401
    update_successful, grad_norm, num_zeros_in_grad = optimizer.step()
402
403
404
    timers('optimizer').stop()

    # Update learning rate.
405
    if update_successful:
406
407
408
409
        increment = get_num_microbatches() * \
                    args.micro_batch_size * \
                    args.data_parallel_size
        lr_scheduler.step(increment=increment)
mohammad's avatar
mohammad committed
410
        skipped_iter = 0
411
412
413
    else:
        skipped_iter = 1

414
    # Empty unused memory
Lawrence McAfee's avatar
Lawrence McAfee committed
415
    if args.empty_unused_memory_level >= 2:
416
417
        torch.cuda.empty_cache()

418
    if mpu.is_pipeline_last_stage(ignore_virtual=True):
419
420
421
422
        # Average loss across microbatches.
        loss_reduced = {}
        for key in losses_reduced[0]:
            losses_reduced_for_key = [x[key] for x in losses_reduced]
423
            loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
424
425
        return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
    return {}, skipped_iter, grad_norm, num_zeros_in_grad
426
427


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
428
def training_log(loss_dict, total_loss_dict, learning_rate, iteration,
mohammad's avatar
mohammad committed
429
                 loss_scale, report_memory_flag, skipped_iter,
430
                 grad_norm, params_norm, num_zeros_in_grad):
Mohammad's avatar
Mohammad committed
431
432
433
434
    """Log training information such as losses, timing, ...."""
    args = get_args()
    timers = get_timers()
    writer = get_tensorboard_writer()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
435

mohammad's avatar
mohammad committed
436
437
    # Advanced, skipped, and Nan iterations.
    advanced_iters_key = 'advanced iterations'
mohammad's avatar
mohammad committed
438
    skipped_iters_key = 'skipped iterations'
mohammad's avatar
mohammad committed
439
440
441
442
443
444
445
446
447
    nan_iters_key = 'nan iterations'
    # Advanced iterations.
    if not skipped_iter:
        total_loss_dict[advanced_iters_key] = total_loss_dict.get(
            advanced_iters_key, 0) + 1
    else:
        if advanced_iters_key not in total_loss_dict:
            total_loss_dict[advanced_iters_key] = 0
    # Skipped iterations.
mohammad's avatar
mohammad committed
448
449
    total_loss_dict[skipped_iters_key] = total_loss_dict.get(
        skipped_iters_key, 0) + skipped_iter
mohammad's avatar
mohammad committed
450
    # Update losses and set nan iterations
mohammad's avatar
mohammad committed
451
    got_nan = False
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
452
    for key in loss_dict:
mohammad's avatar
mohammad committed
453
        if not skipped_iter:
454
455
            total_loss_dict[key] = total_loss_dict.get(
                key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]
mohammad's avatar
mohammad committed
456
457
458
459
460
        else:
            value = loss_dict[key].float().sum().item()
            is_nan = value == float('inf') or \
                     value == -float('inf') or \
                     value != value
mohammad's avatar
mohammad committed
461
            got_nan = got_nan or is_nan
mohammad's avatar
mohammad committed
462
463
    total_loss_dict[nan_iters_key] = total_loss_dict.get(
        nan_iters_key, 0) + int(got_nan)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
464
465
466

    # Logging.
    timers_to_log = []
Neel Kant's avatar
Neel Kant committed
467

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
468
469
470
    def add_to_logging(name):
        if name in timers.timers:
            timers_to_log.append(name)
471
472
473
    add_to_logging('forward-compute')
    add_to_logging('forward-recv')
    add_to_logging('forward-send')
474
    add_to_logging('forward-backward-send-forward-backward-recv')
475
476
477
    add_to_logging('backward-compute')
    add_to_logging('backward-recv')
    add_to_logging('backward-send')
Deepak Narayanan's avatar
Deepak Narayanan committed
478
    add_to_logging('backward-send-forward-recv')
479
    add_to_logging('backward-send-backward-recv')
480
    add_to_logging('backward-params-all-reduce')
481
    add_to_logging('backward-embedding-all-reduce')
482
    add_to_logging('optimizer-copy-to-main-grad')
mohammad's avatar
mohammad committed
483
    add_to_logging('optimizer-unscale-and-check-inf')
484
485
    add_to_logging('optimizer-clip-main-grad')
    add_to_logging('optimizer-copy-main-to-model-params')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
486
    add_to_logging('optimizer')
mohammad's avatar
mohammad committed
487
    add_to_logging('batch-generator')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
488

mohammad's avatar
mohammad committed
489
    # Calculate batch size.
mshoeybi's avatar
mshoeybi committed
490
491
492
    batch_size = args.micro_batch_size * args.data_parallel_size * \
        get_num_microbatches()

mohammad's avatar
mohammad committed
493
494
495
    total_iterations = total_loss_dict[advanced_iters_key] + \
                       total_loss_dict[skipped_iters_key]

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
496
    # Tensorboard values.
497
498
499
500
501
502
503
504
505
506
    if writer and (iteration % args.tensorboard_log_interval == 0 ) and \
       is_last_rank():
        if args.log_learning_rate_to_tensorboard:
            writer.add_scalar('learning-rate', learning_rate, iteration)
            writer.add_scalar('learning-rate vs samples', learning_rate,
                              args.consumed_train_samples)
        if args.log_batch_size_to_tensorboard:
            writer.add_scalar('batch-size', batch_size, iteration)
            writer.add_scalar('batch-size vs samples', batch_size,
                              args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
507
        for key in loss_dict:
mohammad's avatar
mohammad committed
508
509
            writer.add_scalar(key , loss_dict[key], iteration)
            writer.add_scalar(key + ' vs samples', loss_dict[key],
510
                              args.consumed_train_samples)
511
512
513
514
        if args.log_loss_scale_to_tensorboard:
            writer.add_scalar('loss-scale', loss_scale, iteration)
            writer.add_scalar('loss-scale vs samples', loss_scale,
                              args.consumed_train_samples)
515
516
517
518
        if grad_norm is not None:
            writer.add_scalar('grad-norm', grad_norm, iteration)
            writer.add_scalar('grad-norm vs samples', grad_norm,
                              args.consumed_train_samples)
519
520
521
        if num_zeros_in_grad is not None:
            writer.add_scalar('num-zeros', num_zeros_in_grad, iteration)
            writer.add_scalar('num-zeros vs samples', num_zeros_in_grad,
Rewon Child's avatar
Rewon Child committed
522
                              args.consumed_train_samples)
mohammad's avatar
mohammad committed
523
524
525
526
        if params_norm is not None:
            writer.add_scalar('params-norm', params_norm, iteration)
            writer.add_scalar('params-norm vs samples', params_norm,
                              args.consumed_train_samples)
527
528
529
        if args.log_timers_to_tensorboard:
            timers.write(timers_to_log, writer, iteration,
                         normalizer=total_iterations)
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
        if args.log_memory_to_tensorboard:
            mem_stats = torch.cuda.memory_stats()
            writer.add_scalar(
                "mem-reserved-bytes",
                mem_stats["reserved_bytes.all.current"],
                iteration,
            )
            writer.add_scalar(
                "mem-allocated-bytes",
                mem_stats["allocated_bytes.all.current"],
                iteration,
            )
            writer.add_scalar(
                "mem-allocated-count",
                mem_stats["allocation.all.current"],
                iteration,
            )
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
547
548

    if iteration % args.log_interval == 0:
549
        elapsed_time = timers('interval-time').elapsed()
mohammad's avatar
mohammad committed
550
        elapsed_time_per_iteration = elapsed_time / total_iterations
mshoeybi's avatar
mshoeybi committed
551
        if writer:
552
553
554
            if args.log_timers_to_tensorboard:
                writer.add_scalar('iteration-time',
                                  elapsed_time_per_iteration, iteration)
555
556
        log_string = ' iteration {:8d}/{:8d} |'.format(
            iteration, args.train_iters)
mshoeybi's avatar
mshoeybi committed
557
        log_string += ' consumed samples: {:12d} |'.format(
558
            args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
559
        log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(
mohammad's avatar
mohammad committed
560
            elapsed_time_per_iteration * 1000.0)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
561
        log_string += ' learning rate: {:.3E} |'.format(learning_rate)
mohammad's avatar
mohammad committed
562
        log_string += ' global batch size: {:5d} |'.format(batch_size)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
563
        for key in total_loss_dict:
mohammad's avatar
mohammad committed
564
565
566
567
            if key not in [advanced_iters_key, skipped_iters_key,
                           nan_iters_key]:
                avg = total_loss_dict[key].item() / \
                      float(max(1, total_loss_dict[advanced_iters_key]))
568
569
570
                if avg > 0.0:
                    log_string += ' {}: {:.6E} |'.format(key, avg)
                total_loss_dict[key] = torch.cuda.FloatTensor([0.0])
571
        log_string += ' loss scale: {:.1f} |'.format(loss_scale)
572
573
        if grad_norm is not None:
            log_string += ' grad norm: {:.3f} |'.format(grad_norm)
574
575
        if num_zeros_in_grad is not None:
            log_string += ' num zeros: {:.1f} |'.format(num_zeros_in_grad)
mohammad's avatar
mohammad committed
576
577
        if params_norm is not None:
            log_string += ' params norm: {:.3f} |'.format(params_norm)
mohammad's avatar
mohammad committed
578
579
        log_string += ' number of skipped iterations: {:3d} |'.format(
            total_loss_dict[skipped_iters_key])
mohammad's avatar
mohammad committed
580
        log_string += ' number of nan iterations: {:3d} |'.format(
mohammad's avatar
mohammad committed
581
582
            total_loss_dict[nan_iters_key])
        total_loss_dict[advanced_iters_key] = 0
mohammad's avatar
mohammad committed
583
        total_loss_dict[skipped_iters_key] = 0
mohammad's avatar
mohammad committed
584
        total_loss_dict[nan_iters_key] = 0
585
        print_rank_last(log_string)
586
587
588
        if report_memory_flag and learning_rate > 0.:
            # Report memory after optimizer state has been initialized.
            report_memory('(after {} iterations)'.format(iteration))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
589
590
591
592
593
594
            report_memory_flag = False
        timers.log(timers_to_log, normalizer=args.log_interval)

    return report_memory_flag


595
596
597
598
599
def save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler):
    timers = get_timers()
    # Extra barrier is added to make sure
    # all ranks report the max time.
    torch.distributed.barrier()
600
    timers('save-checkpoint').start()
601
602
    save_checkpoint(iteration, model, optimizer, lr_scheduler)
    torch.distributed.barrier()
603
604
    timers('save-checkpoint').stop()
    timers.log(['save-checkpoint'])
605
606


607
def train(forward_step_func, model, optimizer, lr_scheduler,
608
          train_data_iterator, valid_data_iterator):
609
    """Train the model function."""
Mohammad's avatar
Mohammad committed
610
611
    args = get_args()
    timers = get_timers()
612

613
614
615
    # Write args to tensorboard
    write_args_to_tensorboard()

616
    # Turn on training mode which enables dropout.
617
618
    for model_module in model:
        model_module.train()
619
620
621
622
623
624
625

    # Tracking loss.
    total_loss_dict = {}

    # Iterations.
    iteration = args.iteration

626
    timers('interval-time').start()
627
    print_datetime('before the start of training step')
628
629
    report_memory_flag = True
    while iteration < args.train_iters:
mohammad's avatar
mohammad committed
630
        update_num_microbatches(args.consumed_train_samples)
631
632
633
634
635
636
        loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \
            train_step(forward_step_func,
                       train_data_iterator,
                       model,
                       optimizer,
                       lr_scheduler)
637
        iteration += 1
638
        args.consumed_train_samples += mpu.get_data_parallel_world_size() * \
639
                                       args.micro_batch_size * \
mohammad's avatar
mohammad committed
640
                                       get_num_microbatches()
641
642

        # Logging.
643
        loss_scale = optimizer.get_loss_scale().item()
644
645
646
        params_norm = None
        if args.log_params_norm:
            params_norm = calc_params_l2_norm(model)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
647
648
        report_memory_flag = training_log(loss_dict, total_loss_dict,
                                          optimizer.param_groups[0]['lr'],
Mohammad's avatar
Mohammad committed
649
                                          iteration, loss_scale,
650
                                          report_memory_flag, skipped_iter,
651
                                          grad_norm, params_norm, num_zeros_in_grad)
652
653

        # Autoresume
654
655
        if args.adlr_autoresume and \
           (iteration % args.adlr_autoresume_interval == 0):
656
            check_adlr_autoresume_termination(iteration, model, optimizer,
657
                                              lr_scheduler)
658
659
660
661
662
663

        # Evaluation
        if args.eval_interval and iteration % args.eval_interval == 0 and \
           args.do_valid:
            prefix = 'iteration {}'.format(iteration)
            evaluate_and_print_results(prefix, forward_step_func,
664
                                       valid_data_iterator, model,
Mohammad's avatar
Mohammad committed
665
                                       iteration, False)
666

667
668
669
670
671
672
673
674
        # Checkpointing
        saved_checkpoint = False
        if args.save and args.save_interval and \
           iteration % args.save_interval == 0:
            save_checkpoint_and_time(iteration, model, optimizer,
                                     lr_scheduler)
            saved_checkpoint = True

675
676
677
678
679
680
681
682
683
684
685
686
        # Exiting based on duration
        if args.exit_duration_in_mins:
            train_time = (time.time() - _TRAIN_START_TIME) / 60.0
            done_cuda = torch.cuda.IntTensor(
                [train_time > args.exit_duration_in_mins])
            torch.distributed.all_reduce(
                done_cuda, op=torch.distributed.ReduceOp.MAX)
            done = done_cuda.item()
            if done:
                if not saved_checkpoint:
                    save_checkpoint_and_time(iteration, model, optimizer,
                                             lr_scheduler)
687
                print_datetime('exiting program after {} minutes'.format(train_time))
688
689
                sys.exit()

690
        # Exiting based on iterations
691
        if args.exit_interval and iteration % args.exit_interval == 0:
692
693
694
            if not saved_checkpoint:
                save_checkpoint_and_time(iteration, model, optimizer,
                                         lr_scheduler)
695
            torch.distributed.barrier()
696
            print_datetime('exiting program at iteration {}'.format(iteration))
Mohammad's avatar
Mohammad committed
697
            sys.exit()
698

699

mohammad's avatar
mohammad committed
700
    return iteration
701
702


Mohammad's avatar
Mohammad committed
703
def evaluate(forward_step_func, data_iterator, model, verbose=False):
704
    """Evaluation."""
Mohammad's avatar
Mohammad committed
705
    args = get_args()
706
707

    # Turn on evaluation mode which disables dropout.
708
709
    for model_module in model:
        model_module.eval()
710
711
712
713
714
715
716
717
718
719

    total_loss_dict = {}

    with torch.no_grad():
        iteration = 0
        while iteration < args.eval_iters:
            iteration += 1
            if verbose and iteration % args.log_interval == 0:
                print_rank_0('Evaluating iter {}/{}'.format(iteration,
                                                            args.eval_iters))
720

721
            forward_backward_func = get_forward_backward_func()
722
723
724
725
            loss_dicts = forward_backward_func(
                forward_step_func, data_iterator, model, optimizer=None,
                timers=None, forward_only=True)

726
            # Empty unused memory
Lawrence McAfee's avatar
Lawrence McAfee committed
727
            if args.empty_unused_memory_level >= 1:
728
729
                torch.cuda.empty_cache()

730
731
732
            if mpu.is_pipeline_last_stage(ignore_virtual=True):
                # Reduce across processes.
                for loss_dict in loss_dicts:
733
                    for key in loss_dict:
734
735
                        total_loss_dict[key] = total_loss_dict.get(
                            key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]
736

737
            args.consumed_valid_samples += mpu.get_data_parallel_world_size() \
738
                                           * args.micro_batch_size \
mohammad's avatar
mohammad committed
739
                                           * get_num_microbatches()
740
    # Move model back to the train mode.
741
742
    for model_module in model:
        model_module.train()
743
744

    for key in total_loss_dict:
mohammad's avatar
mohammad committed
745
        total_loss_dict[key] /= args.eval_iters * get_num_microbatches()
746
747
748
749
750

    return total_loss_dict

def evaluate_and_print_results(prefix, forward_step_func,
                               data_iterator, model,
Mohammad's avatar
Mohammad committed
751
                               iteration, verbose=False):
752
    """Helper function to evaluate and dump results on screen."""
753
    args = get_args()
Mohammad's avatar
Mohammad committed
754
755
756
    writer = get_tensorboard_writer()

    total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose)
757
758
759
760
761
    string = ' validation loss at {} | '.format(prefix)
    for key in total_loss_dict:
        string += '{} value: {:.6E} | '.format(key, total_loss_dict[key].item())
        ppl = math.exp(min(20, total_loss_dict[key].item()))
        string += '{} PPL: {:.6E} | '.format(key, ppl)
mshoeybi's avatar
mshoeybi committed
762
        if writer:
mohammad's avatar
mohammad committed
763
            writer.add_scalar('{} validation'.format(key),
764
765
                              total_loss_dict[key].item(),
                              iteration)
mohammad's avatar
mohammad committed
766
            writer.add_scalar('{} validation vs samples'.format(key),
767
768
                              total_loss_dict[key].item(),
                              args.consumed_train_samples)
769
            if args.log_validation_ppl_to_tensorboard:
mohammad's avatar
mohammad committed
770
                writer.add_scalar('{} validation ppl'.format(key), ppl,
771
                                  iteration)
mohammad's avatar
mohammad committed
772
                writer.add_scalar('{} validation ppl vs samples'.format(key),
773
                                  ppl, args.consumed_train_samples)
774
775

    length = len(string) + 1
776
777
778
    print_rank_last('-' * length)
    print_rank_last(string)
    print_rank_last('-' * length)
779
780


Vijay Korthikanti's avatar
Vijay Korthikanti committed
781
def cyclic_iter(iter):
782
    while True:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
783
        for x in iter:
784
785
            yield x

786
787
788
def build_train_valid_test_data_iterators(
        build_train_valid_test_datasets_provider):
    """XXX"""
Mohammad's avatar
Mohammad committed
789
    args = get_args()
790

791
792
793
    (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)

    print_rank_0('> building train, validation, and test datasets ...')
794
795
796

    # Backward compatibility, assume fixed batch size.
    if args.iteration > 0 and args.consumed_train_samples == 0:
797
798
        assert args.train_samples is None, \
            'only backward compatiblity support for iteration-based training'
mohammad's avatar
mohammad committed
799
        args.consumed_train_samples = args.iteration * args.global_batch_size
800
    if args.iteration > 0 and args.consumed_valid_samples == 0:
801
802
803
        if args.train_samples is None:
            args.consumed_valid_samples = (args.iteration // args.eval_interval) * \
                args.eval_iters * args.global_batch_size
804

805
    # Data loader only on rank 0 of each model parallel group.
806
    if mpu.get_tensor_model_parallel_rank() == 0:
807
808

        # Number of train/valid/test samples.
809
810
811
812
813
814
        if args.train_samples:
            train_samples = args.train_samples
        else:
            train_samples = args.train_iters * args.global_batch_size
        eval_iters = (args.train_iters // args.eval_interval + 1) * \
                     args.eval_iters
815
        test_iters = args.eval_iters
816
        train_val_test_num_samples = [train_samples,
mohammad's avatar
mohammad committed
817
818
                                      eval_iters * args.global_batch_size,
                                      test_iters * args.global_batch_size]
819
820
821
822
823
824
825
826
827
828
        print_rank_0(' > datasets target sizes (minimum size):')
        print_rank_0('    train:      {}'.format(train_val_test_num_samples[0]))
        print_rank_0('    validation: {}'.format(train_val_test_num_samples[1]))
        print_rank_0('    test:       {}'.format(train_val_test_num_samples[2]))

        # Build the datasets.
        train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider(
            train_val_test_num_samples)

        # Build dataloders.
829
830
831
832
833
        train_dataloader = build_pretraining_data_loader(
            train_ds, args.consumed_train_samples)
        valid_dataloader = build_pretraining_data_loader(
            valid_ds, args.consumed_valid_samples)
        test_dataloader = build_pretraining_data_loader(test_ds, 0)
834
835
836
837
838
839
840
841
842
843
844
845
846

        # Flags to know if we need to do training/validation/testing.
        do_train = train_dataloader is not None and args.train_iters > 0
        do_valid = valid_dataloader is not None and args.eval_iters > 0
        do_test = test_dataloader is not None and args.eval_iters > 0
        # Need to broadcast num_tokens and num_type_tokens.
        flags = torch.cuda.LongTensor(
            [int(do_train), int(do_valid), int(do_test)])
    else:
        flags = torch.cuda.LongTensor([0, 0, 0])

    # Broadcast num tokens.
    torch.distributed.broadcast(flags,
847
848
                                mpu.get_tensor_model_parallel_src_rank(),
                                group=mpu.get_tensor_model_parallel_group())
849
850
851
852
    args.do_train = flags[0].item()
    args.do_valid = flags[1].item()
    args.do_test = flags[2].item()

Vijay Korthikanti's avatar
Vijay Korthikanti committed
853

854
    # Build iterators.
Vijay Korthikanti's avatar
Vijay Korthikanti committed
855
856
857
    dl_type = args.dataloader_type
    assert dl_type in ['single', 'cyclic']

858
    if train_dataloader is not None:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
859
860
        train_data_iterator = iter(train_dataloader) if dl_type == 'single' \
                              else iter(cyclic_iter(train_dataloader))
861
862
863
    else:
        train_data_iterator = None

864
    if valid_dataloader is not None:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
865
866
        valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \
                              else iter(cyclic_iter(valid_dataloader))
867
    else:
868
        valid_data_iterator = None
869

870
    if test_dataloader is not None:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
871
872
        test_data_iterator = iter(test_dataloader) if dl_type == 'single' \
                             else iter(cyclic_iter(test_dataloader))
873
874
875
    else:
        test_data_iterator = None

876
    return train_data_iterator, valid_data_iterator, test_data_iterator