training.py 42.3 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad's avatar
Mohammad committed
16
"""Pretrain utilities."""
17
18
19

from datetime import datetime
import math
Mohammad's avatar
Mohammad committed
20
import sys
21
22
23
24
import time
# The earliest we can measure the start time.
_TRAIN_START_TIME = time.time()

25
26
27
28
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from apex.optimizers import FusedAdam as Adam

Neel Kant's avatar
Neel Kant committed
29
from megatron import get_args
Mohammad's avatar
Mohammad committed
30
31
from megatron import get_timers
from megatron import get_tensorboard_writer
32
from megatron import get_current_global_batch_size
mohammad's avatar
mohammad committed
33
from megatron import get_num_microbatches
mohammad's avatar
mohammad committed
34
from megatron import is_last_rank
mohammad's avatar
mohammad committed
35
from megatron import update_num_microbatches
36
from megatron import mpu
Neel Kant's avatar
Neel Kant committed
37
from megatron import print_rank_0
38
from megatron import print_rank_last
Mohammad's avatar
Mohammad committed
39
40
from megatron.checkpointing import load_checkpoint
from megatron.checkpointing import save_checkpoint
41
42
from megatron.fp16 import FP16_Module
from megatron.fp16 import FP16_Optimizer
Mohammad's avatar
Mohammad committed
43
from megatron.initialize import initialize_megatron
44
from megatron.initialize import write_args_to_tensorboard
45
46
47
from megatron.learning_rates import AnnealingLR
from megatron.model import DistributedDataParallel as LocalDDP
from megatron.model import get_params_for_weight_decay_optimization
Neel Kant's avatar
Neel Kant committed
48
from megatron.model.realm_model import ICTBertModel
49
from megatron.utils import check_adlr_autoresume_termination
50
from megatron.data.data_loaders import build_pretraining_data_loader
Mostofa Patwary's avatar
Mostofa Patwary committed
51
from megatron.utils import report_memory, params_grad_norm, params_global_norm, print_model, print_grads
52
53


54
55
56
57
58
59
60
def print_datetime(string):
    """Note that this call will sync across all ranks."""
    torch.distributed.barrier()
    time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print_rank_0('[' + string + '] datetime: {} '.format(time_str))


61
def pretrain(train_valid_test_dataset_provider, model_provider,
62
             forward_step_func, extra_args_provider=None, args_defaults={}):
63
64
65
    """Main training program.

    This function will run the followings in the order provided:
Mohammad's avatar
Mohammad committed
66
67
        1) initialize Megatron.
        2) setup model, optimizer and lr schedule using the model_provider.
68
        3) call train_val_test_data_provider to get train/val/test datasets.
Mohammad's avatar
Mohammad committed
69
        4) train the modle using the forward_step_func.
70
71

    Arguments:
72
73
74
        train_valid_test_dataset_provider: a function that takes the size of
            train/valid/test dataset and returns `train, valid, test` datasets.
        model_provider: a function that returns a vanilla version of the
Mohammad's avatar
Mohammad committed
75
76
77
78
79
80
81
82
83
84
            model. By vanilla we mean a simple model on cpu with no fp16 or ddp.
        forward_step_func: a function that takes a `data iterator` and `model`,
            and returns a `loss` scalar with a dictionary with key:values being
            the info we would like to monitor during training, for example
            `lm-loss: value`. We also require that this function add
            `batch generator` to the timers class.
        extra_args_provider: a function that takes a parser and adds arguments
            to it. It is used for programs to add their own arguments.
        args_defaults: a dictionary from argument-name to argument-value. It
            to set already parse arguments.
85
86
    """

87
    # Initalize and get arguments, timers, and Tensorboard writer.
88
89
    initialize_megatron(extra_args_provider=extra_args_provider,
                        args_defaults=args_defaults)
90

91
92
93
94
95
96
97
98
    # Adjust the startup time so it reflects the largest value.
    # This will be closer to what scheduler will see (outside of
    # image ... launches.
    global _TRAIN_START_TIME
    start_time_tensor = torch.cuda.FloatTensor([_TRAIN_START_TIME])
    torch.distributed.all_reduce(start_time_tensor,
                                 op=torch.distributed.ReduceOp.MIN)
    _TRAIN_START_TIME = start_time_tensor.item()
mshoeybi's avatar
mshoeybi committed
99
    print_rank_0('time to initialize megatron (seconds): {:.3f}'.format(
100
101
102
        time.time() - _TRAIN_START_TIME))
    print_datetime('after megatron is initialized')

103
    args = get_args()
Mohammad's avatar
Mohammad committed
104
    timers = get_timers()
105
106

    # Model, optimizer, and learning rate.
Mohammad's avatar
Mohammad committed
107
108
109
    timers('model and optimizer').start()
    model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider)
    timers('model and optimizer').stop()
110
111
    print_datetime('after model, optimizer, and learning rate '
                   'scheduler are built')
112
113

    # Data stuff.
114
115
116
117
118
    timers('train/valid/test data iterators').start()
    train_data_iterator, valid_data_iterator, test_data_iterator \
        = build_train_valid_test_data_iterators(
            train_valid_test_dataset_provider)
    timers('train/valid/test data iterators').stop()
mshoeybi's avatar
mshoeybi committed
119
    print_datetime('after dataloaders are built')
Mohammad's avatar
Mohammad committed
120
121
122

    # Print setup timing.
    print_rank_0('done with setups ...')
123
    timers.log(['model and optimizer', 'train/valid/test data iterators'])
Mohammad's avatar
Mohammad committed
124
    print_rank_0('training ...')
125
126

    iteration = 0
127
    if args.do_train and args.train_iters > 0:
mohammad's avatar
mohammad committed
128
129
130
        iteration = train(forward_step_func,
                          model, optimizer, lr_scheduler,
                          train_data_iterator, valid_data_iterator)
131
    print_datetime('after training is done')
Mohammad's avatar
Mohammad committed
132

133
134
135
    if args.do_valid:
        prefix = 'the end of training for val data'
        evaluate_and_print_results(prefix, forward_step_func,
136
                                   valid_data_iterator, model,
Mohammad's avatar
Mohammad committed
137
                                   iteration, False)
138
139

    if args.save and iteration != 0:
140
        save_checkpoint(iteration, model, optimizer, lr_scheduler)
141
142
143
144
145
146

    if args.do_test:
        # Run on test data.
        prefix = 'the end of training for test data'
        evaluate_and_print_results(prefix, forward_step_func,
                                   test_data_iterator, model,
Mohammad's avatar
Mohammad committed
147
                                   0, True)
148

149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def update_train_iters(args):

    # For iteration-based training, we don't need to do anything
    if args.train_iters:
        return

    # Constant batch size with sample-based training.
    if args.rampup_batch_size is None:
        args.train_iters = args.train_samples // args.global_batch_size

    else:
        # Sample based training with rampup batch size.
        iterations = 0
        consumed_samples = 0
        # Rampup phase.
        while consumed_samples <= int(args.rampup_batch_size[2]):
165
166
            update_num_microbatches(consumed_samples, consistency_check=False)
            consumed_samples += get_current_global_batch_size()
167
168
            iterations += 1
        # Reset
169
        update_num_microbatches(0, consistency_check=False)
170
171
172
173
174
175
176
177
        # Constant phase
        # Note that we throw away any partial last batch.
        iterations += (args.train_samples - consumed_samples) // \
                      args.global_batch_size
        args.train_iters = iterations

    print_rank_0('setting training iterations to {}'.format(args.train_iters))

178

Mohammad's avatar
Mohammad committed
179
def get_model(model_provider_func):
180
    """Build the model."""
Mohammad's avatar
Mohammad committed
181
    args = get_args()
182
183

    # Build model on cpu.
Mohammad's avatar
Mohammad committed
184
    model = model_provider_func()
185
186
187

    # Print number of parameters.
    if mpu.get_data_parallel_rank() == 0:
188
        print(' > number of parameters on (tensor, pipeline) '
189
              'model parallel rank ({}, {}): {}'.format(
190
191
            mpu.get_tensor_model_parallel_rank(),
            mpu.get_pipeline_model_parallel_rank(),
192
193
194
195
196
197
198
199
200
201
202
            sum([p.nelement() for p in model.parameters()])), flush=True)

    # GPU allocation.
    model.cuda(torch.cuda.current_device())

    # Fp16 conversion.
    if args.fp16:
        model = FP16_Module(model)

    if args.DDP_impl == 'torch':
        i = torch.cuda.current_device()
Mohammad's avatar
Mohammad committed
203
204
        model = torchDDP(model, device_ids=[i], output_device=i,
                         process_group=mpu.get_data_parallel_group())
205
206
        return model
    if args.DDP_impl == 'local':
Mohammad's avatar
Mohammad committed
207
        model = LocalDDP(model)
208
209
        return model

210
    raise NotImplementedError('Unknown DDP implementation specified: {}. '
211
                              'Exiting.'.format(args.DDP_impl))
212
213


Mohammad's avatar
Mohammad committed
214
def get_optimizer(model):
215
    """Set up the optimizer."""
Mohammad's avatar
Mohammad committed
216
    args = get_args()
217
218

    # Build parameter groups (weight decay and non-decay).
Mohammad's avatar
Mohammad committed
219
    while isinstance(model, (torchDDP, LocalDDP, FP16_Module)):
220
221
222
223
224
225
        model = model.module
    param_groups = get_params_for_weight_decay_optimization(model)

    # Add model parallel attribute if it is not set.
    for param_group in param_groups:
        for param in param_group['params']:
226
227
            if not hasattr(param, 'tensor_model_parallel'):
                param.tensor_model_parallel = False
228
229

    # Use Adam.
230
231
    optimizer = Adam(param_groups, lr=args.lr, weight_decay=args.weight_decay,
        betas=(args.adam_beta1, args.adam_beta2), eps=args.adam_eps)
232
233
234
235
236
237
238
239

    # Wrap into fp16 optimizer.
    if args.fp16:
        optimizer = FP16_Optimizer(optimizer,
                                   static_loss_scale=args.loss_scale,
                                   dynamic_loss_scale=args.dynamic_loss_scale,
                                   dynamic_loss_args={
                                       'scale_window': args.loss_scale_window,
Neel Kant's avatar
Neel Kant committed
240
                                       'min_scale': args.min_scale,
241
242
243
244
245
                                       'delayed_shift': args.hysteresis})

    return optimizer


Mohammad's avatar
Mohammad committed
246
def get_learning_rate_scheduler(optimizer):
247
    """Build the learning rate scheduler."""
Mohammad's avatar
Mohammad committed
248
    args = get_args()
249

250
251
252
253
254
    # Iteration-based training.
    if args.train_iters:
        if args.lr_decay_iters is None:
            args.lr_decay_iters = args.train_iters
        decay_steps = args.lr_decay_iters * args.global_batch_size
255
256
        if args.lr_warmup_fraction is not None:
            warmup_steps = args.lr_warmup_fraction * decay_steps
257
258
        else:
            warmup_steps = args.lr_warmup_iters * args.global_batch_size
259
260
261
262
263
    # Sample-based training.
    elif args.train_samples:
        # We need to set training iters for later use. Technically
        # we need to adjust the training samples too (due to last
        # batch being incomplete) but we leave it as is for now.
264
        update_train_iters(args)
265
266
267
        if args.lr_decay_samples is None:
            args.lr_decay_samples = args.train_samples
        decay_steps = args.lr_decay_samples
268
269
        if args.lr_warmup_fraction is not None:
            warmup_steps = args.lr_warmup_fraction * decay_steps
270
271
        else:
            warmup_steps = args.lr_warmup_samples
272
    else:
273
274
275
        raise Exception(
            'either train-iters or train-samples should be provided.')

276
277
    lr_scheduler = AnnealingLR(
        optimizer,
278
        max_lr=args.lr,
279
        min_lr=args.min_lr,
280
281
        warmup_steps=warmup_steps,
        decay_steps=decay_steps,
282
        decay_style=args.lr_decay_style,
283
284
285
286
287
288
        use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler,
        override_lr_scheduler=args.override_lr_scheduler)

    return lr_scheduler


Mohammad's avatar
Mohammad committed
289
def setup_model_and_optimizer(model_provider_func):
290
    """Setup model and optimizer."""
Mohammad's avatar
Mohammad committed
291
    args = get_args()
292

Mohammad's avatar
Mohammad committed
293
294
295
    model = get_model(model_provider_func)
    optimizer = get_optimizer(model)
    lr_scheduler = get_learning_rate_scheduler(optimizer)
296
297

    if args.load is not None:
298
299
300
301
302
        timers = get_timers()
        # Extra barrier is added to make sure all ranks report the
        # max time.
        torch.distributed.barrier()
        timers('load checkpoint').start()
303
        args.iteration = load_checkpoint(model, optimizer, lr_scheduler)
304
305
306
        torch.distributed.barrier()
        timers('load checkpoint').stop()
        timers.log(['load checkpoint'])
307
308
309
    else:
        args.iteration = 0

mohammad's avatar
mohammad committed
310
    # We only support local DDP with multiple micro-batches.
mohammad's avatar
mohammad committed
311
312
313
    if get_num_microbatches() > 1:
        assert args.DDP_impl == 'local'

Neel Kant's avatar
Neel Kant committed
314
315
316
317
318
    # get model without FP16 and/or TorchDDP wrappers
    unwrapped_model = model
    while hasattr(unwrapped_model, 'module'):
        unwrapped_model = unwrapped_model.module

319
320
    if args.iteration == 0 and hasattr(unwrapped_model,
                                       'init_state_dict_from_bert'):
321
        print("Initializing ICT from pretrained BERT model", flush=True)
322
        unwrapped_model.init_state_dict_from_bert()
Neel Kant's avatar
Neel Kant committed
323

324
325
326
    return model, optimizer, lr_scheduler


327
def communicate(tensor_send_next, tensor_send_prev, recv_forward, recv_backward):
328
    """Communicate tensors between stages."""
329
330
331
332
333
334
    args = get_args()

    # Create placeholder tensors for receive in forward and backward directions
    # if needed.
    tensor_recv_prev = None
    tensor_recv_next = None
335
    tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size)
336
337
338
    dtype = args.params_dtype
    if args.fp32_residual_connection:
        dtype = torch.float
339
340
341
    if recv_forward:
        tensor_recv_prev = torch.empty(tensor_shape,
                                       requires_grad=True,
342
                                       device=torch.cuda.current_device(),
343
                                       dtype=dtype)
344
345
346
    if recv_backward:
        tensor_recv_next = torch.empty(tensor_shape,
                                       requires_grad=True,
347
                                       device=torch.cuda.current_device(),
348
                                       dtype=dtype)
349
350

    # Send tensors in both the forward and backward directions as appropriate.
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
    ops = []
    if tensor_send_prev is not None:
        send_prev_op = torch.distributed.P2POp(torch.distributed.isend, tensor_send_prev,
                                               mpu.get_pipeline_model_parallel_prev_rank())
        ops.append(send_prev_op)
    if tensor_recv_prev is not None:
        recv_prev_op = torch.distributed.P2POp(torch.distributed.irecv, tensor_recv_prev,
                                               mpu.get_pipeline_model_parallel_prev_rank())
        ops.append(recv_prev_op)
    if tensor_send_next is not None:
        send_next_op = torch.distributed.P2POp(torch.distributed.isend, tensor_send_next,
                                               mpu.get_pipeline_model_parallel_next_rank())
        ops.append(send_next_op)
    if tensor_recv_next is not None:
        recv_next_op = torch.distributed.P2POp(torch.distributed.irecv, tensor_recv_next,
                                               mpu.get_pipeline_model_parallel_next_rank())
        ops.append(recv_next_op)
    reqs = torch.distributed.batch_isend_irecv(ops)
    for req in reqs:
        req.wait()
371
372
373
374
375

    return tensor_recv_prev, tensor_recv_next


def backward_step(optimizer, model, input_tensor, output_tensor, output_tensor_grad):
376
    """Backward step."""
Mohammad's avatar
Mohammad committed
377
378
    args = get_args()
    timers = get_timers()
379

380
381
382
383
    # Retain the grad on the input_tensor.
    if input_tensor is not None:
        input_tensor.retain_grad()

384
    # Backward pass.
385
386
387
388
389
390
391
392
393
394
395
396
397
398
    if args.fp16:
        optimizer.backward(output_tensor, update_master_grads=False,
                           output_tensor_grad=output_tensor_grad)
    else:
        torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad)

    # Collect the grad of the input_tensor.
    input_tensor_grad = None
    if input_tensor is not None:
        input_tensor_grad = input_tensor.grad

    return input_tensor_grad


399
400
401
def forward_step_with_communication(forward_step_func, data_iterator, model,
                                    input_tensors, output_tensors,
                                    losses_reduced, timers):
402
403
    args = get_args()

404
    if not mpu.is_pipeline_first_stage():
405
        timers('forward-recv').start()
406
407
408
409
410
        input_tensor, _ = communicate(
            tensor_send_next=None,
            tensor_send_prev=None,
            recv_forward=True,
            recv_backward=False)
411
        timers('forward-recv').stop()
412
413
414
415
    else:
        input_tensor = None

    # Forward model for one step.
416
    timers('forward-compute').start()
417
    output_tensor = forward_step_func(data_iterator, model, input_tensor)
418
    timers('forward-compute').stop()
419
420
421

    if mpu.is_pipeline_last_stage():
        loss, loss_reduced = output_tensor
mohammad's avatar
mohammad committed
422
        output_tensor = loss / get_num_microbatches()
423
424
        losses_reduced.append(loss_reduced)
    else:
425
        timers('forward-send').start()
426
427
428
429
430
        communicate(
            tensor_send_next=output_tensor,
            tensor_send_prev=None,
            recv_forward=False,
            recv_backward=False)
431
        timers('forward-send').stop()
432
433
434
435
436
437
438
439
440
441
442
443

    input_tensors.append(input_tensor)
    output_tensors.append(output_tensor)


def backward_step_with_communication(optimizer, model, input_tensors, output_tensors, timers):
    input_tensor = input_tensors.pop(0)
    output_tensor = output_tensors.pop(0)

    if mpu.is_pipeline_last_stage():
        output_tensor_grad = None
    else:
444
        timers('backward-recv').start()
445
446
447
448
449
        _, output_tensor_grad = communicate(
            tensor_send_next=None,
            tensor_send_prev=None,
            recv_forward=False,
            recv_backward=True)
450
        timers('backward-recv').stop()
451
452

    # Backward pass for one step.
453
    timers('backward-compute').start()
454
455
    input_grad_tensor = \
        backward_step(optimizer, model, input_tensor, output_tensor, output_tensor_grad)
456
    timers('backward-compute').stop()
457
458

    if not mpu.is_pipeline_first_stage():
459
        timers('backward-send').start()
460
461
462
463
464
        communicate(
            tensor_send_next=None,
            tensor_send_prev=input_grad_tensor,
            recv_forward=False,
            recv_backward=False)
465
        timers('backward-send').stop()
466
467


468
469
470
471
472
def forward_and_backward_steps_with_communication(forward_step_func, data_iterator, model,
                                                  optimizer,
                                                  input_tensor, last_microbatch,
                                                  input_tensors, output_tensors,
                                                  losses_reduced, timers):
473
474
    args = get_args()

475
476
477
478
479
480
481
    # Forward model for one step.
    timers('forward-compute').start()
    output_tensor = forward_step_func(data_iterator, model, input_tensor)
    timers('forward-compute').stop()

    if mpu.is_pipeline_last_stage():
        loss, loss_reduced = output_tensor
mohammad's avatar
mohammad committed
482
        output_tensor = loss / get_num_microbatches()
483
484
485
        output_tensor_grad = None
        losses_reduced.append(loss_reduced)
    else:
Deepak Narayanan's avatar
Deepak Narayanan committed
486
        timers('forward-send-backward-recv').start()
487
488
489
490
491
        _, output_tensor_grad = communicate(
            tensor_send_next=output_tensor,
            tensor_send_prev=None,
            recv_forward=False,
            recv_backward=True)
Deepak Narayanan's avatar
Deepak Narayanan committed
492
        timers('forward-send-backward-recv').stop()
493
494
495
496
497
498
499
500
501
502
503
504
505
506

    input_tensors.append(input_tensor)
    output_tensors.append(output_tensor)

    input_tensor = input_tensors.pop(0)
    output_tensor = output_tensors.pop(0)

    # Backward pass for one step.
    timers('backward-compute').start()
    input_grad_tensor = \
        backward_step(optimizer, model, input_tensor, output_tensor, output_tensor_grad)
    timers('backward-compute').stop()

    if not mpu.is_pipeline_first_stage():
Deepak Narayanan's avatar
Deepak Narayanan committed
507
        timers('backward-send-forward-recv').start()
508
509
510
511
512
        input_tensor, _ = communicate(
            tensor_send_next=None,
            tensor_send_prev=input_grad_tensor,
            recv_forward=(not last_microbatch),
            recv_backward=False)
Deepak Narayanan's avatar
Deepak Narayanan committed
513
        timers('backward-send-forward-recv').stop()
514
515
516
517
518
519
    else:
        input_tensor = None

    return input_tensor


520
521
522
def forward_backward_no_pipelining(forward_step_func, data_iterator, model,
                                   optimizer, timers):
    """Run forward and backward passes without inter-stage communication."""
523
524
    args = get_args()

525
    losses_reduced = []
mohammad's avatar
mohammad committed
526
    for i in range(get_num_microbatches()):
527
528
        timers('forward-compute').start()
        loss, loss_reduced = forward_step_func(data_iterator, model, input_tensor=None)
mohammad's avatar
mohammad committed
529
        output_tensor = loss / get_num_microbatches()
530
531
532
533
534
535
536
537
538
539
        losses_reduced.append(loss_reduced)
        timers('forward-compute').stop()

        timers('backward-compute').start()
        output_tensor_grad = None
        backward_step(optimizer, model, input_tensor=None,
                      output_tensor=output_tensor, output_tensor_grad=None)
        timers('backward-compute').stop()

    return losses_reduced
540

541
542
543
544
545
546
547

def forward_backward_pipelining(forward_step_func, data_iterator, model,
                                optimizer, timers):
    """Run 1F1B schedule, with communication and warmup + cooldown microbatches as needed."""
    args = get_args()

    # Compute number of warmup microbatches.
mohammad's avatar
mohammad committed
548
    num_microbatches = get_num_microbatches()
549
550
551
552
553
    num_warmup_microbatches = \
        (mpu.get_pipeline_model_parallel_world_size() -
         mpu.get_pipeline_model_parallel_rank() - 1)
    num_warmup_microbatches = min(
        num_warmup_microbatches,
554
555
556
        num_microbatches)
    num_microbatches_remaining = \
        num_microbatches - num_warmup_microbatches
557
558
559
560
561

    input_tensors = []
    output_tensors = []
    losses_reduced = []

562
563
    # Run warmup forward passes.
    for i in range(num_warmup_microbatches):
564
565
566
567
        forward_step_with_communication(
            forward_step_func, data_iterator, model,
            input_tensors, output_tensors,
            losses_reduced, timers)
568

569
    # Before running 1F1B, need to receive first forward tensor.
570
571
    # If all microbatches are run in warmup / cooldown phase, then no need to
    # receive this tensor here.
572
    if num_microbatches_remaining > 0:
573
574
575
        if mpu.is_pipeline_first_stage():
            input_tensor = None
        else:
576
            timers('forward-recv').start()
577
578
579
580
            input_tensor, _ = communicate(tensor_send_next=None,
                                          tensor_send_prev=None,
                                          recv_forward=True,
                                          recv_backward=False)
581
            timers('forward-recv').stop()
582
583

    # Run 1F1B.
584
585
    for i in range(num_microbatches_remaining):
        last_iteration = (i == (num_microbatches_remaining - 1))
586
587
588
589
590
591
592
        input_tensor = \
            forward_and_backward_steps_with_communication(forward_step_func, data_iterator, model,
                                                          optimizer,
                                                          input_tensor, last_iteration,
                                                          input_tensors, output_tensors,
                                                          losses_reduced, timers)

593
594
    # Run cooldown backward passes.
    for i in range(num_warmup_microbatches):
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
        backward_step_with_communication(
            optimizer, model, input_tensors, output_tensors, timers)

    return losses_reduced


def train_step(forward_step_func, data_iterator,
               model, optimizer, lr_scheduler):
    """Single training step."""
    args = get_args()
    timers = get_timers()

    # Set grad to zero.
    if args.fp16:
        optimizer.zero_grad(set_grads_to_None=True)
    else:
        optimizer.zero_grad()

    if mpu.get_pipeline_model_parallel_world_size() > 1:
        losses_reduced = forward_backward_pipelining(
            forward_step_func, data_iterator, model, optimizer, timers)
    else:
        losses_reduced = forward_backward_no_pipelining(
            forward_step_func, data_iterator, model, optimizer, timers)
619
620
621

    # All-reduce if needed.
    if args.DDP_impl == 'local':
622
        timers('backward-params-all-reduce').start()
623
624
        model.allreduce_params(reduce_after=False,
                               fp32_allreduce=args.fp32_allreduce)
625
        timers('backward-params-all-reduce').stop()
626

627
628
629
630
    # All-reduce word_embeddings' grad across first and last stages to ensure
    # that word_embeddings parameters stay in sync.
    # This should only run for models that support pipelined model parallelism
    # (BERT and GPT-2).
631
    timers('backward-embedding-all-reduce').start()
632
    if (mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage()) and \
633
            mpu.get_pipeline_model_parallel_world_size() > 1:
634
635
636
637
        unwrapped_model = model
        while isinstance(unwrapped_model, (torchDDP, LocalDDP, FP16_Module)):
            unwrapped_model = unwrapped_model.module

638
639
640
641
        if unwrapped_model.share_word_embeddings:
            word_embeddings_weight = unwrapped_model.word_embeddings_weight()
            torch.distributed.all_reduce(word_embeddings_weight.grad,
                                         group=mpu.get_embedding_group())
642
    timers('backward-embedding-all-reduce').stop()
643

644
645
646
647
648
649
    # Update master gradients.
    timers('backward-master-grad').start()
    if args.fp16:
        optimizer.update_master_grads()
    timers('backward-master-grad').stop()

650
    # Clipping gradients helps prevent the exploding gradient.
651
    timers('backward-clip-grad').start()
652
    if args.clip_grad > 0.:
653
        if not args.fp16:
654
655
656
657
658
659
660
661
            named_parameters = model.named_parameters()
            parameters = []
            parameter_names = []
            for parameter_name, parameter in model.named_parameters():
                parameters.append(parameter)
                parameter_names.append(parameter_name)
            mpu.clip_grad_norm(parameters, args.clip_grad,
                               parameter_names=parameter_names)
662
663
        else:
            optimizer.clip_master_grads(args.clip_grad)
664
    timers('backward-clip-grad').stop()
665

Mostofa Patwary's avatar
Mostofa Patwary committed
666
667
668
669
670
671
672
673
    
    #print_rank_0("after backward")
    #print_grads(model)
    print_model(model)
    print_rank_0(params_global_norm(model))
    print_rank_0(params_grad_norm(model))


674
675
676
677
678
    # Update parameters.
    timers('optimizer').start()
    optimizer.step()
    timers('optimizer').stop()

Mostofa Patwary's avatar
Mostofa Patwary committed
679
680
681
682
683
684
    #print_rank_0("after optimizer")
    #print_model(model)
    print_rank_0(params_global_norm(model))
    #print_rank_0(params_grad_norm(model))
    #sys.exit()

685
686
687
    # Update learning rate.
    skipped_iter = 0
    if not (args.fp16 and optimizer.overflow):
688
689
690
691
        increment = get_num_microbatches() * \
                    args.micro_batch_size * \
                    args.data_parallel_size
        lr_scheduler.step(increment=increment)
692
693
694
    else:
        skipped_iter = 1

695
    if mpu.is_pipeline_last_stage():
696
697
698
699
        # Average loss across microbatches.
        loss_reduced = {}
        for key in losses_reduced[0]:
            losses_reduced_for_key = [x[key] for x in losses_reduced]
700
            loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
701
702
        return loss_reduced, skipped_iter
    return {}, skipped_iter
703
704


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
705
def training_log(loss_dict, total_loss_dict, learning_rate, iteration,
mohammad's avatar
mohammad committed
706
                 loss_scale, report_memory_flag, skipped_iter):
Mohammad's avatar
Mohammad committed
707
708
709
710
    """Log training information such as losses, timing, ...."""
    args = get_args()
    timers = get_timers()
    writer = get_tensorboard_writer()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
711

mohammad's avatar
mohammad committed
712
713
    # Advanced, skipped, and Nan iterations.
    advanced_iters_key = 'advanced iterations'
mohammad's avatar
mohammad committed
714
    skipped_iters_key = 'skipped iterations'
mohammad's avatar
mohammad committed
715
716
717
718
719
720
721
722
723
    nan_iters_key = 'nan iterations'
    # Advanced iterations.
    if not skipped_iter:
        total_loss_dict[advanced_iters_key] = total_loss_dict.get(
            advanced_iters_key, 0) + 1
    else:
        if advanced_iters_key not in total_loss_dict:
            total_loss_dict[advanced_iters_key] = 0
    # Skipped iterations.
mohammad's avatar
mohammad committed
724
725
    total_loss_dict[skipped_iters_key] = total_loss_dict.get(
        skipped_iters_key, 0) + skipped_iter
mohammad's avatar
mohammad committed
726
    # Update losses and set nan iterations
mohammad's avatar
mohammad committed
727
    got_nan = False
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
728
    for key in loss_dict:
mohammad's avatar
mohammad committed
729
        if not skipped_iter:
730
731
            total_loss_dict[key] = total_loss_dict.get(
                key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]
mohammad's avatar
mohammad committed
732
733
734
735
736
        else:
            value = loss_dict[key].float().sum().item()
            is_nan = value == float('inf') or \
                     value == -float('inf') or \
                     value != value
mohammad's avatar
mohammad committed
737
            got_nan = got_nan or is_nan
mohammad's avatar
mohammad committed
738
739
    total_loss_dict[nan_iters_key] = total_loss_dict.get(
        nan_iters_key, 0) + int(got_nan)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
740
741
742

    # Logging.
    timers_to_log = []
Neel Kant's avatar
Neel Kant committed
743

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
744
745
746
    def add_to_logging(name):
        if name in timers.timers:
            timers_to_log.append(name)
747
748
749
    add_to_logging('forward-compute')
    add_to_logging('forward-recv')
    add_to_logging('forward-send')
Deepak Narayanan's avatar
Deepak Narayanan committed
750
    add_to_logging('forward-send-backward-recv')
751
752
753
    add_to_logging('backward-compute')
    add_to_logging('backward-recv')
    add_to_logging('backward-send')
Deepak Narayanan's avatar
Deepak Narayanan committed
754
    add_to_logging('backward-send-forward-recv')
755
    add_to_logging('backward-master-grad')
756
    add_to_logging('backward-params-all-reduce')
757
    add_to_logging('backward-embedding-all-reduce')
758
    add_to_logging('backward-clip-grad')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
759
    add_to_logging('optimizer')
mohammad's avatar
mohammad committed
760
    add_to_logging('batch-generator')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
761

mohammad's avatar
mohammad committed
762
    # Calculate batch size.
mshoeybi's avatar
mshoeybi committed
763
764
765
    batch_size = args.micro_batch_size * args.data_parallel_size * \
        get_num_microbatches()

mohammad's avatar
mohammad committed
766
767
768
    total_iterations = total_loss_dict[advanced_iters_key] + \
                       total_loss_dict[skipped_iters_key]

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
769
    # Tensorboard values.
mohammad's avatar
mohammad committed
770
771
772
    if writer and is_last_rank():
        writer.add_scalar('learning-rate', learning_rate, iteration)
        writer.add_scalar('learning-rate vs samples', learning_rate,
773
                          args.consumed_train_samples)
mohammad's avatar
mohammad committed
774
775
        writer.add_scalar('batch-size', batch_size, iteration)
        writer.add_scalar('batch-size vs samples', batch_size,
776
                          args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
777
        for key in loss_dict:
mohammad's avatar
mohammad committed
778
779
            writer.add_scalar(key , loss_dict[key], iteration)
            writer.add_scalar(key + ' vs samples', loss_dict[key],
780
                              args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
781
        if args.fp16:
mohammad's avatar
mohammad committed
782
783
            writer.add_scalar('loss-scale', loss_scale, iteration)
            writer.add_scalar('loss-scale vs samples', loss_scale,
784
                              args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
785
        timers.write(timers_to_log, writer, iteration,
mohammad's avatar
mohammad committed
786
                     normalizer=total_iterations)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
787
788
789

    if iteration % args.log_interval == 0:
        elapsed_time = timers('interval time').elapsed()
mohammad's avatar
mohammad committed
790
        elapsed_time_per_iteration = elapsed_time / total_iterations
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
791
        if writer and torch.distributed.get_rank() == 0:
mohammad's avatar
mohammad committed
792
793
            writer.add_scalar('iteration-time',
                              elapsed_time_per_iteration, iteration)
794
795
        log_string = ' iteration {:8d}/{:8d} |'.format(
            iteration, args.train_iters)
mshoeybi's avatar
mshoeybi committed
796
        log_string += ' consumed samples: {:12d} |'.format(
797
            args.consumed_train_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
798
        log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(
mohammad's avatar
mohammad committed
799
            elapsed_time_per_iteration * 1000.0)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
800
        log_string += ' learning rate: {:.3E} |'.format(learning_rate)
mohammad's avatar
mohammad committed
801
        log_string += ' global batch size: {:5d} |'.format(batch_size)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
802
        for key in total_loss_dict:
mohammad's avatar
mohammad committed
803
804
805
806
            if key not in [advanced_iters_key, skipped_iters_key,
                           nan_iters_key]:
                avg = total_loss_dict[key].item() / \
                      float(max(1, total_loss_dict[advanced_iters_key]))
807
808
809
                if avg > 0.0:
                    log_string += ' {}: {:.6E} |'.format(key, avg)
                total_loss_dict[key] = torch.cuda.FloatTensor([0.0])
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
810
811
        if args.fp16:
            log_string += ' loss scale: {:.1f} |'.format(loss_scale)
mohammad's avatar
mohammad committed
812
813
        log_string += ' number of skipped iterations: {:3d} |'.format(
            total_loss_dict[skipped_iters_key])
mohammad's avatar
mohammad committed
814
        log_string += ' number of nan iterations: {:3d} |'.format(
mohammad's avatar
mohammad committed
815
816
            total_loss_dict[nan_iters_key])
        total_loss_dict[advanced_iters_key] = 0
mohammad's avatar
mohammad committed
817
        total_loss_dict[skipped_iters_key] = 0
mohammad's avatar
mohammad committed
818
        total_loss_dict[nan_iters_key] = 0
819
        print_rank_last(log_string)
820
821
822
        if report_memory_flag and learning_rate > 0.:
            # Report memory after optimizer state has been initialized.
            report_memory('(after {} iterations)'.format(iteration))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
823
824
825
826
827
828
            report_memory_flag = False
        timers.log(timers_to_log, normalizer=args.log_interval)

    return report_memory_flag


829
830
831
832
833
834
835
836
837
838
839
840
def save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler):
    timers = get_timers()
    # Extra barrier is added to make sure
    # all ranks report the max time.
    torch.distributed.barrier()
    timers('save checkpoint').start()
    save_checkpoint(iteration, model, optimizer, lr_scheduler)
    torch.distributed.barrier()
    timers('save checkpoint').stop()
    timers.log(['save checkpoint'])


841
def train(forward_step_func, model, optimizer, lr_scheduler,
842
          train_data_iterator, valid_data_iterator):
843
    """Train the model function."""
Mohammad's avatar
Mohammad committed
844
845
    args = get_args()
    timers = get_timers()
846

847
848
849
    # Write args to tensorboard
    write_args_to_tensorboard()

850
851
852
853
854
855
856
857
858
859
    # Turn on training mode which enables dropout.
    model.train()

    # Tracking loss.
    total_loss_dict = {}

    # Iterations.
    iteration = args.iteration

    timers('interval time').start()
860
    print_datetime('before the start of training step')
861
862
    report_memory_flag = True
    while iteration < args.train_iters:
mohammad's avatar
mohammad committed
863
        update_num_microbatches(args.consumed_train_samples)
864
865
866
867
        loss_dict, skipped_iter = train_step(forward_step_func,
                                             train_data_iterator,
                                             model,
                                             optimizer,
Mohammad's avatar
Mohammad committed
868
                                             lr_scheduler)
869
        iteration += 1
870
        args.consumed_train_samples += mpu.get_data_parallel_world_size() * \
871
                                       args.micro_batch_size * \
mohammad's avatar
mohammad committed
872
                                       get_num_microbatches()
873
874

        # Logging.
Mohammad's avatar
Mohammad committed
875
876
877
        loss_scale = None
        if args.fp16:
            loss_scale = optimizer.loss_scale
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
878
879
        report_memory_flag = training_log(loss_dict, total_loss_dict,
                                          optimizer.param_groups[0]['lr'],
Mohammad's avatar
Mohammad committed
880
                                          iteration, loss_scale,
mohammad's avatar
mohammad committed
881
                                          report_memory_flag, skipped_iter)
882
883

        # Autoresume
884
885
        if args.adlr_autoresume and \
           (iteration % args.adlr_autoresume_interval == 0):
886
            check_adlr_autoresume_termination(iteration, model, optimizer,
887
                                              lr_scheduler)
888
889
890
891
892
893

        # Evaluation
        if args.eval_interval and iteration % args.eval_interval == 0 and \
           args.do_valid:
            prefix = 'iteration {}'.format(iteration)
            evaluate_and_print_results(prefix, forward_step_func,
894
                                       valid_data_iterator, model,
Mohammad's avatar
Mohammad committed
895
                                       iteration, False)
896

897
898
899
900
901
902
903
904
        # Checkpointing
        saved_checkpoint = False
        if args.save and args.save_interval and \
           iteration % args.save_interval == 0:
            save_checkpoint_and_time(iteration, model, optimizer,
                                     lr_scheduler)
            saved_checkpoint = True

905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
        # Exiting based on duration
        if args.exit_duration_in_mins:
            train_time = (time.time() - _TRAIN_START_TIME) / 60.0
            done_cuda = torch.cuda.IntTensor(
                [train_time > args.exit_duration_in_mins])
            torch.distributed.all_reduce(
                done_cuda, op=torch.distributed.ReduceOp.MAX)
            done = done_cuda.item()
            if done:
                if not saved_checkpoint:
                    save_checkpoint_and_time(iteration, model, optimizer,
                                             lr_scheduler)
                print_datetime('exiting program after {} minutes'.format(train_time))                
                sys.exit()

        # Exiting based on iterations        
921
        if args.exit_interval and iteration % args.exit_interval == 0:
Mostofa Patwary's avatar
Mostofa Patwary committed
922
923
924
            #if not saved_checkpoint:
            #    save_checkpoint_and_time(iteration, model, optimizer,
            #                             lr_scheduler)
925
            torch.distributed.barrier()
926
            print_datetime('exiting program at iteration {}'.format(iteration))                
Mohammad's avatar
Mohammad committed
927
            sys.exit()
928

929

mohammad's avatar
mohammad committed
930
    return iteration
931
932


Mohammad's avatar
Mohammad committed
933
def evaluate(forward_step_func, data_iterator, model, verbose=False):
934
    """Evaluation."""
Mohammad's avatar
Mohammad committed
935
    args = get_args()
936
937
938
939
940
941
942
943
944
945
946
947
948

    # Turn on evaluation mode which disables dropout.
    model.eval()

    total_loss_dict = {}

    with torch.no_grad():
        iteration = 0
        while iteration < args.eval_iters:
            iteration += 1
            if verbose and iteration % args.log_interval == 0:
                print_rank_0('Evaluating iter {}/{}'.format(iteration,
                                                            args.eval_iters))
949

mohammad's avatar
mohammad committed
950
            for _ in range(get_num_microbatches()):
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
                if not mpu.is_pipeline_first_stage():
                    input_tensor, _ = communicate(
                        tensor_send_next=None,
                        tensor_send_prev=None,
                        recv_forward=True,
                        recv_backward=False)
                else:
                    input_tensor = None

                # Forward evaluation.
                output_tensor = forward_step_func(data_iterator, model, input_tensor)

                if mpu.is_pipeline_last_stage():
                    _, loss_dict = output_tensor
                    # Reduce across processes.
                    for key in loss_dict:
                        total_loss_dict[key] = total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + \
                            loss_dict[key]
                else:
                    communicate(
                        tensor_send_next=output_tensor,
                        tensor_send_prev=None,
                        recv_forward=False,
                        recv_backward=False)
975

976
            args.consumed_valid_samples += mpu.get_data_parallel_world_size() \
977
                                           * args.micro_batch_size \
mohammad's avatar
mohammad committed
978
                                           * get_num_microbatches()
979
980
981
982
    # Move model back to the train mode.
    model.train()

    for key in total_loss_dict:
mohammad's avatar
mohammad committed
983
        total_loss_dict[key] /= args.eval_iters * get_num_microbatches()
984
985
986
987
988

    return total_loss_dict

def evaluate_and_print_results(prefix, forward_step_func,
                               data_iterator, model,
Mohammad's avatar
Mohammad committed
989
                               iteration, verbose=False):
990
    """Helper function to evaluate and dump results on screen."""
Mohammad's avatar
Mohammad committed
991
992
993
    writer = get_tensorboard_writer()

    total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose)
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
    string = ' validation loss at {} | '.format(prefix)
    for key in total_loss_dict:
        string += '{} value: {:.6E} | '.format(key, total_loss_dict[key].item())
        ppl = math.exp(min(20, total_loss_dict[key].item()))
        string += '{} PPL: {:.6E} | '.format(key, ppl)
        if writer and torch.distributed.get_rank() == 0:
            writer.add_scalar('{} value'.format(key),
                              total_loss_dict[key].item(),
                              iteration)
            writer.add_scalar('{} ppl'.format(key), ppl, iteration)

    length = len(string) + 1
1006
1007
1008
    print_rank_last('-' * length)
    print_rank_last(string)
    print_rank_last('-' * length)
1009
1010


1011
1012
1013
def build_train_valid_test_data_iterators(
        build_train_valid_test_datasets_provider):
    """XXX"""
Mohammad's avatar
Mohammad committed
1014
    args = get_args()
1015

1016
1017
1018
    (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)

    print_rank_0('> building train, validation, and test datasets ...')
1019
1020
1021

    # Backward compatibility, assume fixed batch size.
    if args.iteration > 0 and args.consumed_train_samples == 0:
1022
1023
        assert args.train_samples is None, \
            'only backward compatiblity support for iteration-based training'
mohammad's avatar
mohammad committed
1024
        args.consumed_train_samples = args.iteration * args.global_batch_size
1025
    if args.iteration > 0 and args.consumed_valid_samples == 0:
1026
1027
        assert args.train_samples is None, \
            'only backward compatiblity support for iteration-based training'
1028
        args.consumed_valid_samples = (args.iteration // args.eval_interval) * \
mohammad's avatar
mohammad committed
1029
            args.eval_iters * args.global_batch_size
1030

1031
    # Data loader only on rank 0 of each model parallel group.
1032
    if mpu.get_tensor_model_parallel_rank() == 0:
1033
1034

        # Number of train/valid/test samples.
1035
1036
1037
1038
1039
1040
        if args.train_samples:
            train_samples = args.train_samples
        else:
            train_samples = args.train_iters * args.global_batch_size
        eval_iters = (args.train_iters // args.eval_interval + 1) * \
                     args.eval_iters
1041
        test_iters = args.eval_iters
1042
        train_val_test_num_samples = [train_samples,
mohammad's avatar
mohammad committed
1043
1044
                                      eval_iters * args.global_batch_size,
                                      test_iters * args.global_batch_size]
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
        print_rank_0(' > datasets target sizes (minimum size):')
        print_rank_0('    train:      {}'.format(train_val_test_num_samples[0]))
        print_rank_0('    validation: {}'.format(train_val_test_num_samples[1]))
        print_rank_0('    test:       {}'.format(train_val_test_num_samples[2]))

        # Build the datasets.
        train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider(
            train_val_test_num_samples)

        # Build dataloders.
1055
1056
1057
1058
1059
        train_dataloader = build_pretraining_data_loader(
            train_ds, args.consumed_train_samples)
        valid_dataloader = build_pretraining_data_loader(
            valid_ds, args.consumed_valid_samples)
        test_dataloader = build_pretraining_data_loader(test_ds, 0)
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072

        # Flags to know if we need to do training/validation/testing.
        do_train = train_dataloader is not None and args.train_iters > 0
        do_valid = valid_dataloader is not None and args.eval_iters > 0
        do_test = test_dataloader is not None and args.eval_iters > 0
        # Need to broadcast num_tokens and num_type_tokens.
        flags = torch.cuda.LongTensor(
            [int(do_train), int(do_valid), int(do_test)])
    else:
        flags = torch.cuda.LongTensor([0, 0, 0])

    # Broadcast num tokens.
    torch.distributed.broadcast(flags,
1073
1074
                                mpu.get_tensor_model_parallel_src_rank(),
                                group=mpu.get_tensor_model_parallel_group())
1075
1076
1077
1078
1079
1080
1081
    args.do_train = flags[0].item()
    args.do_valid = flags[1].item()
    args.do_test = flags[2].item()

    # Build iterators.
    if train_dataloader is not None:
        train_data_iterator = iter(train_dataloader)
1082
1083
1084
    else:
        train_data_iterator = None

1085
1086
    if valid_dataloader is not None:
        valid_data_iterator = iter(valid_dataloader)
1087
    else:
1088
        valid_data_iterator = None
1089

1090
1091
    if test_dataloader is not None:
        test_data_iterator = iter(test_dataloader)
1092
1093
1094
    else:
        test_data_iterator = None

1095
    return train_data_iterator, valid_data_iterator, test_data_iterator