arguments.py 31.9 KB
Newer Older
Raul Puri's avatar
Raul Puri committed
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
Raul Puri's avatar
Raul Puri committed
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad's avatar
Mohammad committed
16
"""Megatron arguments."""
Raul Puri's avatar
Raul Puri committed
17
18
19
20

import argparse
import os

21
import torch
22
from megatron import fused_kernels
Raul Puri's avatar
Raul Puri committed
23

24
25
def parse_args(extra_args_provider=None, defaults={},
               ignore_unknown_args=False):
Mohammad's avatar
Mohammad committed
26
    """Parse all arguments."""
27
28
    parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
                                     allow_abbrev=False)
Mohammad's avatar
Mohammad committed
29

Mohammad's avatar
Mohammad committed
30
31
32
33
34
35
36
37
38
39
40
41
    # Standard arguments.
    parser = _add_network_size_args(parser)
    parser = _add_regularization_args(parser)
    parser = _add_training_args(parser)
    parser = _add_initialization_args(parser)
    parser = _add_learning_rate_args(parser)
    parser = _add_checkpointing_args(parser)
    parser = _add_mixed_precision_args(parser)
    parser = _add_distributed_args(parser)
    parser = _add_validation_args(parser)
    parser = _add_data_args(parser)
    parser = _add_autoresume_args(parser)
Neel Kant's avatar
Neel Kant committed
42
    parser = _add_realm_args(parser)
43
    parser = _add_vit_args(parser)
Mohammad's avatar
Mohammad committed
44
45
46
47

    # Custom arguments.
    if extra_args_provider is not None:
        parser = extra_args_provider(parser)
Mohammad's avatar
Mohammad committed
48

Mohammad's avatar
Mohammad committed
49
    # Parse.
50
51
52
53
    if ignore_unknown_args:
        args, _ = parser.parse_known_args()
    else:
        args = parser.parse_args()
Mohammad's avatar
Mohammad committed
54

Mohammad's avatar
Mohammad committed
55
56
57
    # Distributed args.
    args.rank = int(os.getenv('RANK', '0'))
    args.world_size = int(os.getenv("WORLD_SIZE", '1'))
mohammad's avatar
mohammad committed
58
    # Tensor model parallel size.
59
60
    args.tensor_model_parallel_size = min(
        args.tensor_model_parallel_size, args.world_size)
mohammad's avatar
mohammad committed
61
62
63
64
    assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
        ' ({}) is not divisible by tensor model parallel size ({})'.format(
            args.world_size, args.tensor_model_parallel_size)
    # Pipeline model parallel size.
65
66
67
    args.pipeline_model_parallel_size = min(
        args.pipeline_model_parallel_size,
        (args.world_size // args.tensor_model_parallel_size))
mohammad's avatar
mohammad committed
68
    # Checks.
69
70
71
    model_parallel_size = args.pipeline_model_parallel_size * \
                          args.tensor_model_parallel_size
    assert args.world_size % model_parallel_size == 0, 'world size is not'\
mohammad's avatar
mohammad committed
72
73
74
        ' divisible by tensor parallel size ({}) times pipeline paralle ' \
        'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
                           args.pipeline_model_parallel_size)
75
    args.data_parallel_size = args.world_size // model_parallel_size
Mohammad's avatar
Mohammad committed
76
    if args.rank == 0:
mohammad's avatar
mohammad committed
77
78
79
80
81
82
83
        print('using world size: {}, data-parallel-size: {}, '
              'tensor-model-parallel size: {}, '
              'pipeline-model-parallel size: {} '.format(
                  args.world_size, args.data_parallel_size,
                  args.tensor_model_parallel_size,
                  args.pipeline_model_parallel_size), flush=True)

84
85
86
87
88
89
90
91
92
93
94
    # Deprecated arguments
    assert args.batch_size is None, '--batch-size argument is no longer ' \
        'valid, use --micro-batch-size instead'
    del args.batch_size
    assert args.warmup is None, '--warmup argument is no longer valid, use ' \
        '--lr-warmup-fraction instead'
    del args.warmup
    assert args.model_parallel_size is None, '--model-parallel-size is no ' \
        'longer valid, use --tensor-model-parallel-size instead'
    del args.model_parallel_size

Jared Casper's avatar
Jared Casper committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
    # Set input defaults.
    for key in defaults:
        # For default to be valid, it should not be provided in the
        # arguments that are passed to the program. We check this by
        # ensuring the arg is set to None.
        if getattr(args, key) is not None:
            if args.rank == 0:
                print('WARNING: overriding default arguments for {key}:{v} \
                       with {key}:{v2}'.format(key=key, v=defaults[key],
                                               v2=getattr(args, key)),
                                               flush=True)
        else:
            setattr(args, key, defaults[key])

mohammad's avatar
mohammad committed
109
110
111
112
113
114
115
116
117
    # Batch size.
    assert args.micro_batch_size is not None
    assert args.micro_batch_size > 0
    if args.global_batch_size is None:
        args.global_batch_size = args.micro_batch_size * args.data_parallel_size
        if args.rank == 0:
            print('setting global batch size to {}'.format(
                args.global_batch_size), flush=True)
    assert args.global_batch_size > 0
Mohammad's avatar
Mohammad committed
118

119
120
121
122
123
124
125
126
    # Parameters dtype.
    args.params_dtype = torch.float
    if args.fp16:
        args.params_dtype = torch.half
    if args.rank == 0:
        print('using {} for parameters ...'.format(args.params_dtype),
              flush=True)

127
128
129
    # Consumed tokens.
    args.consumed_train_samples = 0
    args.consumed_valid_samples = 0
130

131
132
133
134
135
136
137
138
139
    # Iteration-based training.
    if args.train_iters:
        # If we use iteration-based training, make sure the
        # sample-based options are off.
        assert args.train_samples is None, \
            'expected iteration-based training'
        assert args.lr_decay_samples is None, \
            'expected iteration-based learning rate decay'
        assert args.lr_warmup_samples == 0, \
140
            'expected iteration-based learning rate warmup'
141
142
        assert args.rampup_batch_size is None, \
            'expected no batch-size rampup for iteration-based training'
143
        if args.lr_warmup_fraction is not None:
144
            assert args.lr_warmup_iters == 0, \
145
                'can only specify one of lr-warmup-fraction and lr-warmup-iters'
146
147
148
149
150
151
152
153
154
155
156

    # Sample-based training.
    if args.train_samples:
        # If we use sample-based training, make sure the
        # iteration-based options are off.
        assert args.train_iters is None, \
            'expected sample-based training'
        assert args.lr_decay_iters is None, \
            'expected sample-based learning rate decay'
        assert args.lr_warmup_iters == 0, \
            'expected sample-based learnig rate warmup'
157
        if args.lr_warmup_fraction is not None:
158
            assert args.lr_warmup_samples == 0, \
159
                'can only specify one of lr-warmup-fraction and lr-warmup-samples'
160

161
    # Check required arguments.
Mohammad's avatar
Mohammad committed
162
163
    required_args = ['num_layers', 'hidden_size', 'num_attention_heads',
                     'max_position_embeddings']
164
    for req_arg in required_args:
Mohammad's avatar
Mohammad committed
165
        _check_arg_is_not_none(args, req_arg)
166

Mohammad's avatar
Mohammad committed
167
    # Checks.
168
169
170
171
172
173
174
175
176
177
178
179
180
181
    if args.ffn_hidden_size is None:
        args.ffn_hidden_size = 4 * args.hidden_size

    if args.kv_channels is None:
        assert args.hidden_size % args.num_attention_heads == 0
        args.kv_channels = args.hidden_size // args.num_attention_heads

    if args.seq_length is not None:
        assert args.encoder_seq_length is None
        args.encoder_seq_length = args.seq_length
    else:
        assert args.encoder_seq_length is not None
        args.seq_length = args.encoder_seq_length
 
Mohammad's avatar
Mohammad committed
182
    assert args.hidden_size % args.num_attention_heads == 0
Mohammad's avatar
Mohammad committed
183
184
185
186
    if args.seq_length is not None:
        assert args.max_position_embeddings >= args.seq_length
    if args.lr is not None:
        assert args.min_lr <= args.lr
Mohammad's avatar
Mohammad committed
187
188
    if args.save is not None:
        assert args.save_interval is not None
mohammad's avatar
mohammad committed
189
190
191
    # Mixed precision checks.
    if args.fp16_lm_cross_entropy:
        assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
192
193
    if args.fp32_residual_connection:
        assert args.fp16, \
mshoeybi's avatar
mshoeybi committed
194
            'residual connection in fp32 only supported when using fp16.'
mohammad's avatar
mohammad committed
195
196
197
198
199
    # Activation checkpointing.
    if args.distribute_checkpointed_activations:
        assert args.checkpoint_activations, \
            'for distribute-checkpointed-activations to work you '\
            'need to enable checkpoint-activations'
200
201
202
203
204
   
    # Load scaled_masked_softmax_fusion_kernels
    if args.masked_softmax_fusion:
        fused_kernels.load_scaled_upper_triang_masked_softmax_fusion_kernel()
        fused_kernels.load_scaled_masked_softmax_fusion_kernel()
205

206
207
208
209
    # Load mixed precision fused layer norm.
    if args.fp32_residual_connection:
        fused_kernels.load_fused_mix_prec_layer_norm_kernel()

Mohammad's avatar
Mohammad committed
210
211
    _print_args(args)
    return args
Mohammad's avatar
Mohammad committed
212
213


Mohammad's avatar
Mohammad committed
214
215
216
def _print_args(args):
    """Print arguments."""
    if args.rank == 0:
mohammad's avatar
mohammad committed
217
218
        print('------------------------ arguments ------------------------',
              flush=True)
Mohammad's avatar
Mohammad committed
219
220
        str_list = []
        for arg in vars(args):
mohammad's avatar
mohammad committed
221
            dots = '.' * (48 - len(arg))
Mohammad's avatar
Mohammad committed
222
223
224
            str_list.append('  {} {} {}'.format(arg, dots, getattr(args, arg)))
        for arg in sorted(str_list, key=lambda x: x.lower()):
            print(arg, flush=True)
mohammad's avatar
mohammad committed
225
226
        print('-------------------- end of arguments ---------------------',
              flush=True)
Mohammad's avatar
Mohammad committed
227
228


229
230
231
232
def _check_arg_is_not_none(args, arg):
    assert getattr(args, arg) is not None, '{} argument is None'.format(arg)


Mohammad's avatar
Mohammad committed
233
def _add_network_size_args(parser):
Mohammad's avatar
Mohammad committed
234
    group = parser.add_argument_group(title='network size')
Mohammad's avatar
Mohammad committed
235

236
    group.add_argument('--num-layers', type=int, default=None,
Mohammad's avatar
Mohammad committed
237
                       help='Number of transformer layers.')
238
    group.add_argument('--hidden-size', type=int, default=None,
Mohammad's avatar
Mohammad committed
239
                       help='Tansformer hidden size.')
240
241
242
    group.add_argument('--ffn-hidden-size', type=int, default=None,
                       help='Transformer Feed-Forward Network hidden size. This is set to 4*hidden-size if not '
                            'provided')
243
    group.add_argument('--num-attention-heads', type=int, default=None,
Mohammad's avatar
Mohammad committed
244
                       help='Number of transformer attention heads.')
245
246
247
    group.add_argument('--kv-channels', type=int, default=None,
                       help='Projection weights dimension in multi-head attention. '
                            'This is set to args.hidden_size // args.num_attention_heads if not provided.')
248
    group.add_argument('--max-position-embeddings', type=int, default=None,
Mohammad's avatar
Mohammad committed
249
250
251
252
253
                       help='Maximum number of position embeddings to use. '
                       'This is the size of position embedding.')
    group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
                       help='Pad the vocab size to be divisible by this value.'
                       'This is added for computational efficieny reasons.')
Mohammad's avatar
Mohammad committed
254
255
    group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
                       help='Layer norm epsilon.')
Mohammad's avatar
Mohammad committed
256
257
258
259
    group.add_argument('--apply-residual-connection-post-layernorm',
                       action='store_true',
                       help='If set, use original BERT residula connection '
                       'ordering.')
260
261
262
263
    group.add_argument('--openai-gelu', action='store_true',
                       help='Use OpenAIs GeLU implementation. This option'
                       'should not be used unless for backward compatibility'
                       'reasons.')
264
    group.add_argument('--onnx-safe', type=bool, required=False,
265
                       help='Use workarounds for known problems with Torch ONNX exporter')
266
267
268
    group.add_argument('--bert-no-binary-head', action='store_false',
                       help='Disable BERT binary head.',
                       dest='bert_binary_head')
Mohammad's avatar
Mohammad committed
269

Mohammad's avatar
Mohammad committed
270
271
272
    return parser


Mohammad's avatar
Mohammad committed
273
def _add_regularization_args(parser):
Mohammad's avatar
Mohammad committed
274
275
276
    group = parser.add_argument_group(title='regularization')

    group.add_argument('--attention-dropout', type=float, default=0.1,
277
                       help='Post attention dropout probability.')
Mohammad's avatar
Mohammad committed
278
279
280
281
282
283
    group.add_argument('--hidden-dropout', type=float, default=0.1,
                       help='Dropout probability for hidden state transformer.')
    group.add_argument('--weight-decay', type=float, default=0.01,
                       help='Weight decay coefficient for L2 regularization.')
    group.add_argument('--clip-grad', type=float, default=1.0,
                       help='Gradient clipping based on global L2 norm.')
284
285
286
287
288
289
290
    group.add_argument('--adam-beta1', type=float, default=0.9,
                       help='First coefficient for computing running averages of'
                       'gradient and its square')
    group.add_argument('--adam-beta2', type=float, default=0.999,
                       help='Second coefficient for computing running averages of'
                       'gradient and its square')
    group.add_argument('--adam-eps', type=float, default=1e-08,
291
                       help='Term added to the denominator to improve'
292
                       'numerical stability')
293
294
    group.add_argument('--sgd-momentum', type=float, default=0.9,
                       help='Momentum factor for sgd')
Mohammad's avatar
Mohammad committed
295
296
297

    return parser

Mohammad's avatar
Mohammad committed
298
299

def _add_training_args(parser):
Mohammad's avatar
Mohammad committed
300
301
    group = parser.add_argument_group(title='training')

302
    group.add_argument('--micro-batch-size', type=int, default=None,
Mohammad's avatar
Mohammad committed
303
304
                       help='Batch size per model instance (local batch size). '
                       'Global batch size is local batch size times data '
mohammad's avatar
mohammad committed
305
                       'parallel size times number of micro batches.')
306
307
308
    group.add_argument('--batch-size', type=int, default=None,
                       help='Old batch size parameter, do not use. '
                       'Use --micro-batch-size instead')
mohammad's avatar
mohammad committed
309
    group.add_argument('--global-batch-size', type=int, default=None,
mohammad's avatar
mohammad committed
310
311
312
                       help='Training batch size. If set, it should be a '
                       'multiple of micro-batch-size times data-parallel-size. '
                       'If this value is None, then '
mohammad's avatar
mohammad committed
313
                       'use micro-batch-size * data-parallel-size as the '
mohammad's avatar
mohammad committed
314
315
                       'global batch size. This choice will result in 1 for '
                       'number of micro-batches.')
mohammad's avatar
mohammad committed
316
317
318
319
320
321
322
323
324
325
326
327
    group.add_argument('--rampup-batch-size', nargs='*', default=None,
                       help='Batch size ramp up with the following values:'
                       '  --rampup-batch-size <start batch size> '
                       '                      <batch size incerement> '
                       '                      <ramp-up samples> '
                       'For example:'
                       '   --rampup-batch-size 16 8 300000 \ '
                       '   --global-batch-size 1024'
                       'will start with global batch size 16 and over '
                       ' (1024 - 16) / 8 = 126 intervals will increase'
                       'the batch size linearly to 1024. In each interval'
                       'we will use approximately 300000 / 126 = 2380 samples.')
Mohammad's avatar
Mohammad committed
328
329
330
    group.add_argument('--checkpoint-activations', action='store_true',
                       help='Checkpoint activation to allow for training '
                       'with larger models, sequences, and batch sizes.')
331
332
333
334
    group.add_argument('--distribute-checkpointed-activations',
                       action='store_true',
                       help='If set, distribute checkpointed activations '
                       'across model parallel group.')
Mohammad's avatar
Mohammad committed
335
336
    group.add_argument('--checkpoint-num-layers', type=int, default=1,
                       help='chunk size (number of layers) for checkpointing.')
Mohammad's avatar
Mohammad committed
337
    group.add_argument('--train-iters', type=int, default=None,
Mohammad's avatar
Mohammad committed
338
                       help='Total number of iterations to train over all '
339
340
341
342
343
344
                       'training runs. Note that either train-iters or '
                       'train-samples should be provided.')
    group.add_argument('--train-samples', type=int, default=None,
                       help='Total number of samples to train over all '
                       'training runs. Note that either train-iters or '
                       'train-samples should be provided.')
Mohammad's avatar
Mohammad committed
345
346
347
348
349
    group.add_argument('--log-interval', type=int, default=100,
                       help='Report loss and timing interval.')
    group.add_argument('--exit-interval', type=int, default=None,
                       help='Exit the program after the iteration is divisible '
                       'by this value.')
350
351
    group.add_argument('--exit-duration-in-mins', type=int, default=None,
                       help='Exit the program after this many minutes.')
Mohammad's avatar
Mohammad committed
352
353
    group.add_argument('--tensorboard-dir', type=str, default=None,
                       help='Write TensorBoard logs to this directory.')
354
    group.add_argument('--no-masked-softmax-fusion',
355
356
357
                       action='store_false',
                       help='Disable fusion of query_key_value scaling, '
                       'masking, and softmax.',
358
                       dest='masked_softmax_fusion')
359
360
361
362
363
364
    group.add_argument('--no-bias-gelu-fusion', action='store_false',
                       help='Disable bias and gelu fusion.',
                       dest='bias_gelu_fusion')
    group.add_argument('--no-bias-dropout-fusion', action='store_false',
                       help='Disable bias and dropout fusion.',
                       dest='bias_dropout_fusion')
365
366
367
    group.add_argument('--optimizer', type=str, default='adam',
                       choices=['adam', 'sgd'],
                       help='Optimizer function')
Vijay Korthikanti's avatar
Vijay Korthikanti committed
368
369
370
    group.add_argument('--dataloader_type', type=str, default='single',
                       choices=['single', 'cyclic'],
                       help='Single pass vs multiple pass data loader')
Mohammad's avatar
Mohammad committed
371
372
373
    return parser


Mohammad's avatar
Mohammad committed
374
def _add_initialization_args(parser):
Mohammad's avatar
Mohammad committed
375
376
377
378
379
380
381
382
    group = parser.add_argument_group(title='initialization')

    group.add_argument('--seed', type=int, default=1234,
                       help='Random seed used for python, numpy, '
                       'pytorch, and cuda.')
    group.add_argument('--init-method-std', type=float, default=0.02,
                       help='Standard deviation of the zero mean normal '
                       'distribution used for weight initialization.')
383
384
    group.add_argument('--init-method-xavier-uniform', action='store_true',
                       help='Enable Xavier uniform parameter initialization')
Mohammad's avatar
Mohammad committed
385

Mohammad's avatar
Mohammad committed
386
387
388
    return parser


Mohammad's avatar
Mohammad committed
389
def _add_learning_rate_args(parser):
Mohammad's avatar
Mohammad committed
390
391
    group = parser.add_argument_group(title='learning rate')

Mohammad's avatar
Mohammad committed
392
    group.add_argument('--lr', type=float, default=None,
Mohammad's avatar
Mohammad committed
393
394
395
396
                       help='Initial learning rate. Depending on decay style '
                       'and initial warmup, the learing rate at each '
                       'iteration would be different.')
    group.add_argument('--lr-decay-style', type=str, default='linear',
mohammad's avatar
mohammad committed
397
                       choices=['constant', 'linear', 'cosine'],
Mohammad's avatar
Mohammad committed
398
399
400
401
                       help='Learning rate decay function.')
    group.add_argument('--lr-decay-iters', type=int, default=None,
                       help='number of iterations to decay learning rate over,'
                       ' If None defaults to `--train-iters`')
402
403
404
    group.add_argument('--lr-decay-samples', type=int, default=None,
                       help='number of samples to decay learning rate over,'
                       ' If None defaults to `--train-samples`')
405
406
407
    group.add_argument('--lr-warmup-fraction', type=float, default=None,
                       help='fraction of lr-warmup-(iters/samples) to use '
                       'for warmup (as a float)')
408
409
410
411
412
413
    group.add_argument('--lr-warmup-iters', type=int, default=0,
                       help='number of iterations to linearly warmup '
                       'learning rate over.')
    group.add_argument('--lr-warmup-samples', type=int, default=0,
                       help='number of samples to linearly warmup '
                       'learning rate over.')
414
415
416
    group.add_argument('--warmup', type=int, default=None,
                       help='Old lr warmup argument, do not use. Use one of the '
                       '--lr-warmup-* arguments above')
Mohammad's avatar
Mohammad committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    group.add_argument('--min-lr', type=float, default=0.0,
                       help='Minumum value for learning rate. The scheduler'
                       'clip values below this threshold.')
    group.add_argument('--override-lr-scheduler', action='store_true',
                       help='Reset the values of the scheduler (learning rate,'
                       'warmup iterations, minimum learning rate, maximum '
                       'number of iterations, and decay style from input '
                       'arguments and ignore values from checkpoints. Note'
                       'that all the above values will be reset.')
    group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
                       help='Use checkpoint to set the values of the scheduler '
                       '(learning rate, warmup iterations, minimum learning '
                       'rate, maximum number of iterations, and decay style '
                       'from checkpoint and ignore input arguments.')

    return parser


Mohammad's avatar
Mohammad committed
435
def _add_checkpointing_args(parser):
Mohammad's avatar
Mohammad committed
436
437
438
439
440
441
442
443
444
445
446
447
    group = parser.add_argument_group(title='checkpointing')

    group.add_argument('--save', type=str, default=None,
                       help='Output directory to save checkpoints to.')
    group.add_argument('--save-interval', type=int, default=None,
                       help='Number of iterations between checkpoint saves.')
    group.add_argument('--no-save-optim', action='store_true',
                       help='Do not save current optimizer.')
    group.add_argument('--no-save-rng', action='store_true',
                       help='Do not save current rng state.')
    group.add_argument('--load', type=str, default=None,
                       help='Directory containing a model checkpoint.')
Jared Casper's avatar
Jared Casper committed
448
    group.add_argument('--no-load-optim', action='store_true', default=None,
Mohammad's avatar
Mohammad committed
449
                       help='Do not load optimizer when loading checkpoint.')
Jared Casper's avatar
Jared Casper committed
450
    group.add_argument('--no-load-rng', action='store_true', default=None,
Mohammad's avatar
Mohammad committed
451
452
453
454
455
456
457
458
459
                       help='Do not load rng state when loading checkpoint.')
    group.add_argument('--finetune', action='store_true',
                       help='Load model for finetuning. Do not load optimizer '
                       'or rng state from checkpoint and set iteration to 0. '
                       'Assumed when loading a release checkpoint.')

    return parser


Mohammad's avatar
Mohammad committed
460
def _add_mixed_precision_args(parser):
Mohammad's avatar
Mohammad committed
461
462
463
464
    group = parser.add_argument_group(title='mixed precision')

    group.add_argument('--fp16', action='store_true',
                       help='Run model in fp16 mode.')
mohammad's avatar
mohammad committed
465
466
467
468
469
470
471
472
473
474
475
476
    group.add_argument('--loss-scale', type=float, default=None,
                       help='Static loss scaling, positive power of 2 '
                       'values can improve fp16 convergence. If None, dynamic'
                       'loss scaling is used.')
    group.add_argument('--initial-loss-scale', type=float, default=2**32,
                       help='Initial loss-scale for dynamic loss scaling.')
    group.add_argument('--min-loss-scale', type=float, default=1.0,
                       help='Minimum loss scale for dynamic loss scale.')
    group.add_argument('--loss-scale-window', type=float, default=1000,
                       help='Window over which to raise/lower dynamic scale.')
    group.add_argument('--hysteresis', type=int, default=2,
                       help='hysteresis for dynamic loss scaling')
477
478
    group.add_argument('--fp32-residual-connection', action='store_true',
                       help='Move residual connections to fp32.')
479
480
481
    group.add_argument('--no-query-key-layer-scaling', action='store_false',
                       help='Do not scale Q * K^T by 1 / layer-number.',
                       dest='apply_query_key_layer_scaling')
Mohammad's avatar
Mohammad committed
482
    group.add_argument('--attention-softmax-in-fp32', action='store_true',
483
484
485
                       help='Run attention masking and softmax in fp32. '
                       'This flag is ignored unless '
                       '--no-query-key-layer-scaling is specified.')
Mohammad's avatar
Mohammad committed
486
487
    group.add_argument('--fp32-allreduce', action='store_true',
                       help='All-reduce in fp32')
488
489
490
491
    group.add_argument('--fp16-lm-cross-entropy', action='store_true',
                       help='Move the cross entropy unreduced loss calculation'
                       'for lm head to fp16.')

Mohammad's avatar
Mohammad committed
492
493
494
    return parser


Mohammad's avatar
Mohammad committed
495
def _add_distributed_args(parser):
496
497
    group = parser.add_argument_group(title='distributed')

498
499
500
501
    group.add_argument('--tensor-model-parallel-size', type=int, default=1,
                       help='Degree of tensor model parallelism.')
    group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
                       help='Degree of pipeline model parallelism.')
502
503
504
    group.add_argument('--model-parallel-size', type=int, default=None,
                       help='Old model parallel argument, do not use. Use '
                       '--tensor-model-parallel-size instead.')
Mohammad's avatar
Mohammad committed
505
506
507
508
    group.add_argument('--distributed-backend', default='nccl',
                       choices=['nccl', 'gloo'],
                       help='Which backend to use for distributed training.')
    group.add_argument('--DDP-impl', default='local',
Mohammad's avatar
Mohammad committed
509
                       choices=['local', 'torch'],
Mohammad's avatar
Mohammad committed
510
511
512
513
                       help='which DistributedDataParallel implementation '
                       'to use.')
    group.add_argument('--local_rank', type=int, default=None,
                       help='local rank passed from distributed launcher.')
514
515
    group.add_argument('--lazy-mpu-init', type=bool, required=False,
                       help='If set to True, initialize_megatron() skips DDP initialization'
Boris Fomitchev's avatar
Boris Fomitchev committed
516
517
                       ' and returns function to complete it instead.'
                       'Also turns on --use-cpu-initialization flag.'
518
                       'This is for external DDP manager.' )
519
    group.add_argument('--use-cpu-initialization', action='store_true', default=None,
520
                       help='If set, affine parallel weights initialization uses CPU' )
Mohammad's avatar
Mohammad committed
521
522
523
    return parser


Mohammad's avatar
Mohammad committed
524
def _add_validation_args(parser):
Mohammad's avatar
Mohammad committed
525
526
527
528
529
530
531
532
533
    group = parser.add_argument_group(title='validation')

    group.add_argument('--eval-iters', type=int, default=100,
                       help='Number of iterations to run for evaluation'
                       'validation/test for.')
    group.add_argument('--eval-interval', type=int, default=1000,
                       help='Interval between running evaluation on '
                       'validation set.')

Mohammad's avatar
Mohammad committed
534
535
536
    return parser


Mohammad's avatar
Mohammad committed
537
def _add_data_args(parser):
Mohammad's avatar
Mohammad committed
538
539
    group = parser.add_argument_group(title='data and dataloader')

mohammad's avatar
mohammad committed
540
    group.add_argument('--data-path', nargs='*', default=None,
mohammad's avatar
mohammad committed
541
542
543
544
                       help='Path to the training dataset. Accepted format:'
                       '1) a single data path, 2) multiple datasets in the'
                       'form: dataset1-weight dataset1-path dataset2-weight '
                       'dataset2-path ...')
Mohammad's avatar
Mohammad committed
545
    group.add_argument('--split', type=str, default='969, 30, 1',
Mohammad's avatar
Mohammad committed
546
547
                       help='Comma-separated list of proportions for training,'
                       ' validation, and test split. For example the split '
548
549
                       '`90,5,5` will use 90%% of data for training, 5%% for '
                       'validation and 5%% for test.')
Mohammad's avatar
Mohammad committed
550
    group.add_argument('--vocab-file', type=str, default=None,
Mohammad's avatar
Mohammad committed
551
                       help='Path to the vocab file.')
Mohammad's avatar
Mohammad committed
552
553
    group.add_argument('--merge-file', type=str, default=None,
                       help='Path to the BPE merge file.')
Mohammad's avatar
Mohammad committed
554
    group.add_argument('--seq-length', type=int, default=None,
555
                       help='Maximum sequence length to process.')
556
    group.add_argument('--encoder-seq-length', type=int, default=None,
557
558
                       help='Maximum encoder sequence length to process.'
                       'This should be exclusive of --seq-length')
559
560
    group.add_argument('--decoder-seq-length', type=int, default=None,
                       help="Maximum decoder sequence length to process.")
Mohammad's avatar
Mohammad committed
561
562
563
564
565
566
567
568
    group.add_argument('--mask-prob', type=float, default=0.15,
                       help='Probability of replacing a token with mask.')
    group.add_argument('--short-seq-prob', type=float, default=0.1,
                       help='Probability of producing a short sequence.')
    group.add_argument('--mmap-warmup', action='store_true',
                       help='Warm up mmap files.')
    group.add_argument('--num-workers', type=int, default=2,
                       help="Dataloader number of workers.")
Mohammad's avatar
Mohammad committed
569
570
571
    group.add_argument('--tokenizer-type', type=str,
                       default=None,
                       choices=['BertWordPieceLowerCase',
Raul Puri's avatar
Raul Puri committed
572
                                'BertWordPieceCase',
Mohammad's avatar
Mohammad committed
573
574
                                'GPT2BPETokenizer'],
                       help='What type of tokenizer to use.')
575
576
577
578
579
580
581
582
583
584
    group.add_argument('--data-impl', type=str, default='infer',
                       choices=['lazy', 'cached', 'mmap', 'infer'],
                       help='Implementation of indexed datasets.')
    group.add_argument('--reset-position-ids', action='store_true',
                       help='Reset posistion ids after end-of-document token.')
    group.add_argument('--reset-attention-mask', action='store_true',
                       help='Reset self attention maske after '
                       'end-of-document token.')
    group.add_argument('--eod-mask-loss', action='store_true',
                       help='Mask loss for the end of document tokens.')
Mohammad's avatar
Mohammad committed
585

Mohammad's avatar
Mohammad committed
586
587
    return parser

Raul Puri's avatar
Raul Puri committed
588

Mohammad's avatar
Mohammad committed
589
590
def _add_autoresume_args(parser):
    group = parser.add_argument_group(title='autoresume')
Raul Puri's avatar
Raul Puri committed
591

Mohammad's avatar
Mohammad committed
592
593
594
595
596
    group.add_argument('--adlr-autoresume', action='store_true',
                       help='Enable autoresume on adlr cluster.')
    group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
                       help='Intervals over which check for autoresume'
                       'termination signal')
Raul Puri's avatar
Raul Puri committed
597

Mohammad's avatar
Mohammad committed
598
    return parser
Neel Kant's avatar
Neel Kant committed
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618


def _add_realm_args(parser):
    group = parser.add_argument_group(title='realm')

    # network size
    group.add_argument('--ict-head-size', type=int, default=None,
                       help='Size of block embeddings to be used in ICT and REALM (paper default: 128)')

    # checkpointing
    group.add_argument('--ict-load', type=str, default=None,
                       help='Directory containing an ICTBertModel checkpoint')
    group.add_argument('--bert-load', type=str, default=None,
                       help='Directory containing an BertModel checkpoint (needed to start ICT and REALM)')

    # data
    group.add_argument('--titles-data-path', type=str, default=None,
                       help='Path to titles dataset used for ICT')
    group.add_argument('--query-in-block-prob', type=float, default=0.1,
                       help='Probability of keeping query in block for ICT dataset')
Neel Kant's avatar
Neel Kant committed
619
    group.add_argument('--use-one-sent-docs', action='store_true',
Neel Kant's avatar
Neel Kant committed
620
621
                       help='Whether to use one sentence documents in ICT')

622
623
624
625
    # training
    group.add_argument('--report-topk-accuracies', nargs='+', default=[],
                       help="Which top-k accuracies to report (e.g. '1 5 20')")

Neel Kant's avatar
Neel Kant committed
626
627
628
    # faiss index
    group.add_argument('--faiss-use-gpu', action='store_true',
                       help='Whether create the FaissMIPSIndex on GPU')
Neel Kant's avatar
Neel Kant committed
629
    group.add_argument('--block-data-path', type=str, default=None,
Neel Kant's avatar
Neel Kant committed
630
                       help='Where to save/load BlockData to/from')
Neel Kant's avatar
Neel Kant committed
631
632
633
634
635
636

    # indexer
    group.add_argument('--indexer-batch-size', type=int, default=128,
                       help='How large of batches to use when doing indexing jobs')
    group.add_argument('--indexer-log-interval', type=int, default=1000,
                       help='After how many batches should the indexer report progress')
Neel Kant's avatar
Neel Kant committed
637
    return parser
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652


def _add_vit_args(parser):
    group = parser.add_argument_group(title="vit")

    group.add_argument('--num-classes', type=int, default=1000,
                       help='num of classes in vision classificaiton task')
    group.add_argument('--img-dim', type=int, default=224,
                       help='Image size for vision classification task')
    group.add_argument('--num-channels', type=int, default=3,
                       help='Number of channels in input image data')
    group.add_argument('--patch-dim', type=int, default=16,
                       help='patch dimension used in vit')

    return parser