arguments.py 16.7 KB
Newer Older
Raul Puri's avatar
Raul Puri committed
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
Raul Puri's avatar
Raul Puri committed
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad's avatar
Mohammad committed
16
"""Megatron arguments."""
Raul Puri's avatar
Raul Puri committed
17
18
19
20
21

import argparse
import os


22
23
def parse_args(extra_args_provider=None, defaults={},
               ignore_unknown_args=False):
Mohammad's avatar
Mohammad committed
24
25
    """Parse all arguments."""
    parser = argparse.ArgumentParser(description='Megatron-LM Arguments')
Mohammad's avatar
Mohammad committed
26

Mohammad's avatar
Mohammad committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    # Standard arguments.
    parser = _add_network_size_args(parser)
    parser = _add_regularization_args(parser)
    parser = _add_training_args(parser)
    parser = _add_initialization_args(parser)
    parser = _add_learning_rate_args(parser)
    parser = _add_checkpointing_args(parser)
    parser = _add_mixed_precision_args(parser)
    parser = _add_distributed_args(parser)
    parser = _add_validation_args(parser)
    parser = _add_data_args(parser)
    parser = _add_autoresume_args(parser)

    # Custom arguments.
    if extra_args_provider is not None:
        parser = extra_args_provider(parser)
Mohammad's avatar
Mohammad committed
43

Mohammad's avatar
Mohammad committed
44
    # Parse.
45
46
47
48
    if ignore_unknown_args:
        args, _ = parser.parse_known_args()
    else:
        args = parser.parse_args()
Mohammad's avatar
Mohammad committed
49

Mohammad's avatar
Mohammad committed
50
51
    # Set input defaults.
    for key in defaults:
Mohammad's avatar
Mohammad committed
52
53
54
        # For default to be valid, it should not be provided in the
        # arguments that are passed to the program. We check this by
        # ensuring the arg is set to None.
Raul Puri's avatar
Raul Puri committed
55
        if getattr(args, key) is not None:
Raul Puri's avatar
Raul Puri committed
56
            if args.rank == 0:
Raul Puri's avatar
Raul Puri committed
57
58
                print('WARNING: overriding default arguments for {key}:{v} \
                       with {key}:{v2}'.format(key=key, v=defaults[key],
Raul Puri's avatar
Raul Puri committed
59
60
                                               v2=getattr(args, key)),
                                               flush=True)
Raul Puri's avatar
Raul Puri committed
61
62
        else:
            setattr(args, key, defaults[key])
Mohammad's avatar
Mohammad committed
63

64
    # Check required arguments.
Mohammad's avatar
Mohammad committed
65
66
67
68
    required_args = ['num_layers', 'hidden_size', 'num_attention_heads',
                     'max_position_embeddings']
    for req_arg in required_args: 
        _check_arg_is_not_none(args, req_arg)
69

Mohammad's avatar
Mohammad committed
70
71
72
73
74
75
76
    # Distributed args.
    args.rank = int(os.getenv('RANK', '0'))
    args.world_size = int(os.getenv("WORLD_SIZE", '1'))
    args.model_parallel_size = min(args.model_parallel_size, args.world_size)
    if args.rank == 0:
        print('using world size: {} and model-parallel size: {} '.format(
            args.world_size, args.model_parallel_size))
Mohammad's avatar
Mohammad committed
77

Mohammad's avatar
Mohammad committed
78
79
80
81
    # Fp16 loss scaling.
    args.dynamic_loss_scale = False
    if args.loss_scale is None:
        args.dynamic_loss_scale = True
Mohammad's avatar
Mohammad committed
82

Mohammad's avatar
Mohammad committed
83
84
    # Checks.
    assert args.hidden_size % args.num_attention_heads == 0
Mohammad's avatar
Mohammad committed
85
86
87
88
    if args.seq_length is not None:
        assert args.max_position_embeddings >= args.seq_length
    if args.lr is not None:
        assert args.min_lr <= args.lr
Mohammad's avatar
Mohammad committed
89
90
    if args.save is not None:
        assert args.save_interval is not None
Mohammad's avatar
Mohammad committed
91

Mohammad's avatar
Mohammad committed
92
93
    _print_args(args)
    return args
Mohammad's avatar
Mohammad committed
94
95


Mohammad's avatar
Mohammad committed
96
97
98
99
100
101
102
103
104
105
106
def _print_args(args):
    """Print arguments."""
    if args.rank == 0:
        print('-------------------- arguments --------------------', flush=True)
        str_list = []
        for arg in vars(args):
            dots = '.' * (32 - len(arg))
            str_list.append('  {} {} {}'.format(arg, dots, getattr(args, arg)))
        for arg in sorted(str_list, key=lambda x: x.lower()):
            print(arg, flush=True)
        print('---------------- end of arguments ----------------', flush=True)
Mohammad's avatar
Mohammad committed
107
108


109
110
111
112
def _check_arg_is_not_none(args, arg):
    assert getattr(args, arg) is not None, '{} argument is None'.format(arg)


Mohammad's avatar
Mohammad committed
113
def _add_network_size_args(parser):
Mohammad's avatar
Mohammad committed
114
    group = parser.add_argument_group(title='network size')
Mohammad's avatar
Mohammad committed
115

116
    group.add_argument('--num-layers', type=int, default=None,
Mohammad's avatar
Mohammad committed
117
                       help='Number of transformer layers.')
118
    group.add_argument('--hidden-size', type=int, default=None,
Mohammad's avatar
Mohammad committed
119
                       help='Tansformer hidden size.')
120
    group.add_argument('--num-attention-heads', type=int, default=None,
Mohammad's avatar
Mohammad committed
121
                       help='Number of transformer attention heads.')
122
    group.add_argument('--max-position-embeddings', type=int, default=None,
Mohammad's avatar
Mohammad committed
123
124
125
126
127
                       help='Maximum number of position embeddings to use. '
                       'This is the size of position embedding.')
    group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
                       help='Pad the vocab size to be divisible by this value.'
                       'This is added for computational efficieny reasons.')
Mohammad's avatar
Mohammad committed
128
129
    group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
                       help='Layer norm epsilon.')
Mohammad's avatar
Mohammad committed
130
131
132
133
    group.add_argument('--apply-residual-connection-post-layernorm',
                       action='store_true',
                       help='If set, use original BERT residula connection '
                       'ordering.')
134
135
136
137
    group.add_argument('--openai-gelu', action='store_true',
                       help='Use OpenAIs GeLU implementation. This option'
                       'should not be used unless for backward compatibility'
                       'reasons.')
Mohammad's avatar
Mohammad committed
138

Mohammad's avatar
Mohammad committed
139
140
141
    return parser


Mohammad's avatar
Mohammad committed
142
def _add_regularization_args(parser):
Mohammad's avatar
Mohammad committed
143
144
145
146
147
148
149
150
151
152
153
154
155
    group = parser.add_argument_group(title='regularization')

    group.add_argument('--attention-dropout', type=float, default=0.1,
                       help='Post attention dropout ptobability.')
    group.add_argument('--hidden-dropout', type=float, default=0.1,
                       help='Dropout probability for hidden state transformer.')
    group.add_argument('--weight-decay', type=float, default=0.01,
                       help='Weight decay coefficient for L2 regularization.')
    group.add_argument('--clip-grad', type=float, default=1.0,
                       help='Gradient clipping based on global L2 norm.')

    return parser

Mohammad's avatar
Mohammad committed
156
157

def _add_training_args(parser):
Mohammad's avatar
Mohammad committed
158
159
    group = parser.add_argument_group(title='training')

Mohammad's avatar
Mohammad committed
160
    group.add_argument('--batch-size', type=int, default=None,
Mohammad's avatar
Mohammad committed
161
162
163
164
165
166
167
168
                       help='Batch size per model instance (local batch size). '
                       'Global batch size is local batch size times data '
                       'parallel size.')
    group.add_argument('--checkpoint-activations', action='store_true',
                       help='Checkpoint activation to allow for training '
                       'with larger models, sequences, and batch sizes.')
    group.add_argument('--checkpoint-num-layers', type=int, default=1,
                       help='chunk size (number of layers) for checkpointing.')
Mohammad's avatar
Mohammad committed
169
    group.add_argument('--train-iters', type=int, default=None,
Mohammad's avatar
Mohammad committed
170
171
172
173
174
175
176
177
178
179
180
181
182
                       help='Total number of iterations to train over all '
                       'training runs.')
    group.add_argument('--log-interval', type=int, default=100,
                       help='Report loss and timing interval.')
    group.add_argument('--exit-interval', type=int, default=None,
                       help='Exit the program after the iteration is divisible '
                       'by this value.')
    group.add_argument('--tensorboard-dir', type=str, default=None,
                       help='Write TensorBoard logs to this directory.')

    return parser


Mohammad's avatar
Mohammad committed
183
def _add_initialization_args(parser):
Mohammad's avatar
Mohammad committed
184
185
186
187
188
189
190
191
    group = parser.add_argument_group(title='initialization')

    group.add_argument('--seed', type=int, default=1234,
                       help='Random seed used for python, numpy, '
                       'pytorch, and cuda.')
    group.add_argument('--init-method-std', type=float, default=0.02,
                       help='Standard deviation of the zero mean normal '
                       'distribution used for weight initialization.')
Mohammad's avatar
Mohammad committed
192

Mohammad's avatar
Mohammad committed
193
194
195
    return parser


Mohammad's avatar
Mohammad committed
196
def _add_learning_rate_args(parser):
Mohammad's avatar
Mohammad committed
197
198
    group = parser.add_argument_group(title='learning rate')

Mohammad's avatar
Mohammad committed
199
    group.add_argument('--lr', type=float, default=None,
Mohammad's avatar
Mohammad committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
                       help='Initial learning rate. Depending on decay style '
                       'and initial warmup, the learing rate at each '
                       'iteration would be different.')
    group.add_argument('--lr-decay-style', type=str, default='linear',
                       choices=['constant', 'linear', 'cosine', 'exponential'],
                       help='Learning rate decay function.')
    group.add_argument('--lr-decay-iters', type=int, default=None,
                       help='number of iterations to decay learning rate over,'
                       ' If None defaults to `--train-iters`')
    group.add_argument('--min-lr', type=float, default=0.0,
                       help='Minumum value for learning rate. The scheduler'
                       'clip values below this threshold.')
    group.add_argument('--warmup', type=float, default=0.01,
                       help='Percentage of total iterations to warmup on '
                       '(.01 = 1 percent of all training iters).')
    group.add_argument('--override-lr-scheduler', action='store_true',
                       help='Reset the values of the scheduler (learning rate,'
                       'warmup iterations, minimum learning rate, maximum '
                       'number of iterations, and decay style from input '
                       'arguments and ignore values from checkpoints. Note'
                       'that all the above values will be reset.')
    group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
                       help='Use checkpoint to set the values of the scheduler '
                       '(learning rate, warmup iterations, minimum learning '
                       'rate, maximum number of iterations, and decay style '
                       'from checkpoint and ignore input arguments.')

    return parser


Mohammad's avatar
Mohammad committed
230
def _add_checkpointing_args(parser):
Mohammad's avatar
Mohammad committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
    group = parser.add_argument_group(title='checkpointing')

    group.add_argument('--save', type=str, default=None,
                       help='Output directory to save checkpoints to.')
    group.add_argument('--save-interval', type=int, default=None,
                       help='Number of iterations between checkpoint saves.')
    group.add_argument('--no-save-optim', action='store_true',
                       help='Do not save current optimizer.')
    group.add_argument('--no-save-rng', action='store_true',
                       help='Do not save current rng state.')
    group.add_argument('--load', type=str, default=None,
                       help='Directory containing a model checkpoint.')
    group.add_argument('--no-load-optim', action='store_true',
                       help='Do not load optimizer when loading checkpoint.')
    group.add_argument('--no-load-rng', action='store_true',
                       help='Do not load rng state when loading checkpoint.')
    group.add_argument('--finetune', action='store_true',
                       help='Load model for finetuning. Do not load optimizer '
                       'or rng state from checkpoint and set iteration to 0. '
                       'Assumed when loading a release checkpoint.')

    return parser


Mohammad's avatar
Mohammad committed
255
def _add_mixed_precision_args(parser):
Mohammad's avatar
Mohammad committed
256
257
258
259
260
261
262
263
264
265
    group = parser.add_argument_group(title='mixed precision')

    group.add_argument('--fp16', action='store_true',
                       help='Run model in fp16 mode.')
    group.add_argument('--apply-query-key-layer-scaling', action='store_true',
                       help='Scale Q * K^T by 1 / layer-number. If this flag '
                       'is set, then it will automatically set '
                       'attention-softmax-in-fp32 to true')
    group.add_argument('--attention-softmax-in-fp32', action='store_true',
                       help='Run attention masking and softmax in fp32.')
Mohammad's avatar
Mohammad committed
266
267
    group.add_argument('--fp32-allreduce', action='store_true',
                       help='All-reduce in fp32')
Mohammad's avatar
Mohammad committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
    group.add_argument('--hysteresis', type=int, default=2,
                       help='hysteresis for dynamic loss scaling')
    group.add_argument('--loss-scale', type=float, default=None,
                       help='Static loss scaling, positive power of 2 '
                       'values can improve fp16 convergence. If None, dynamic'
                       'loss scaling is used.')
    group.add_argument('--loss-scale-window', type=float, default=1000,
                       help='Window over which to raise/lower dynamic scale.')
    group.add_argument('--min-scale', type=float, default=1,
                       help='Minimum loss scale for dynamic loss scale.')

    return parser


Mohammad's avatar
Mohammad committed
282
def _add_distributed_args(parser):
Mohammad's avatar
Mohammad committed
283
284
    group = parser.add_argument_group(title='mixed precision')

Mohammad's avatar
Mohammad committed
285
286
    group.add_argument('--model-parallel-size', type=int, default=1,
                       help='Size of the model parallel.')
Mohammad's avatar
Mohammad committed
287
288
289
290
    group.add_argument('--distributed-backend', default='nccl',
                       choices=['nccl', 'gloo'],
                       help='Which backend to use for distributed training.')
    group.add_argument('--DDP-impl', default='local',
Mohammad's avatar
Mohammad committed
291
                       choices=['local', 'torch'],
Mohammad's avatar
Mohammad committed
292
293
294
295
296
297
298
299
                       help='which DistributedDataParallel implementation '
                       'to use.')
    group.add_argument('--local_rank', type=int, default=None,
                       help='local rank passed from distributed launcher.')

    return parser


Mohammad's avatar
Mohammad committed
300
def _add_validation_args(parser):
Mohammad's avatar
Mohammad committed
301
302
303
304
305
306
307
308
309
    group = parser.add_argument_group(title='validation')

    group.add_argument('--eval-iters', type=int, default=100,
                       help='Number of iterations to run for evaluation'
                       'validation/test for.')
    group.add_argument('--eval-interval', type=int, default=1000,
                       help='Interval between running evaluation on '
                       'validation set.')

Mohammad's avatar
Mohammad committed
310
311
312
    return parser


Mohammad's avatar
Mohammad committed
313
def _add_data_args(parser):
Mohammad's avatar
Mohammad committed
314
315
    group = parser.add_argument_group(title='data and dataloader')

Mohammad's avatar
Mohammad committed
316
    group.add_argument('--data-path', type=str, default=None,
Mohammad's avatar
Mohammad committed
317
                       help='Path to combined dataset to split.')
Mohammad's avatar
Mohammad committed
318
    group.add_argument('--split', type=str, default='969, 30, 1',
Mohammad's avatar
Mohammad committed
319
320
321
322
                       help='Comma-separated list of proportions for training,'
                       ' validation, and test split. For example the split '
                       '`90,5,5` will use 90% of data for training, 5% for '
                       'validation and 5% for test.')
Mohammad's avatar
Mohammad committed
323
    group.add_argument('--vocab-file', type=str, default=None,
Mohammad's avatar
Mohammad committed
324
                       help='Path to the vocab file.')
Mohammad's avatar
Mohammad committed
325
326
    group.add_argument('--merge-file', type=str, default=None,
                       help='Path to the BPE merge file.')
Mohammad's avatar
Mohammad committed
327
    group.add_argument('--seq-length', type=int, default=None,
Mohammad's avatar
Mohammad committed
328
329
330
331
332
333
334
335
336
                       help="Maximum sequence length to process.")
    group.add_argument('--mask-prob', type=float, default=0.15,
                       help='Probability of replacing a token with mask.')
    group.add_argument('--short-seq-prob', type=float, default=0.1,
                       help='Probability of producing a short sequence.')
    group.add_argument('--mmap-warmup', action='store_true',
                       help='Warm up mmap files.')
    group.add_argument('--num-workers', type=int, default=2,
                       help="Dataloader number of workers.")
Mohammad's avatar
Mohammad committed
337
338
339
    group.add_argument('--tokenizer-type', type=str,
                       default=None,
                       choices=['BertWordPieceLowerCase',
Raul Puri's avatar
Raul Puri committed
340
                                'BertWordPieceCase',
Mohammad's avatar
Mohammad committed
341
342
                                'GPT2BPETokenizer'],
                       help='What type of tokenizer to use.')
343
344
345
346
347
348
349
350
351
352
    group.add_argument('--data-impl', type=str, default='infer',
                       choices=['lazy', 'cached', 'mmap', 'infer'],
                       help='Implementation of indexed datasets.')
    group.add_argument('--reset-position-ids', action='store_true',
                       help='Reset posistion ids after end-of-document token.')
    group.add_argument('--reset-attention-mask', action='store_true',
                       help='Reset self attention maske after '
                       'end-of-document token.')
    group.add_argument('--eod-mask-loss', action='store_true',
                       help='Mask loss for the end of document tokens.')
Mohammad's avatar
Mohammad committed
353

Mohammad's avatar
Mohammad committed
354
355
    return parser

Raul Puri's avatar
Raul Puri committed
356

Mohammad's avatar
Mohammad committed
357
358
def _add_autoresume_args(parser):
    group = parser.add_argument_group(title='autoresume')
Raul Puri's avatar
Raul Puri committed
359

Mohammad's avatar
Mohammad committed
360
361
362
363
364
    group.add_argument('--adlr-autoresume', action='store_true',
                       help='Enable autoresume on adlr cluster.')
    group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
                       help='Intervals over which check for autoresume'
                       'termination signal')
Raul Puri's avatar
Raul Puri committed
365

Mohammad's avatar
Mohammad committed
366
    return parser