arguments.py 16.5 KB
Newer Older
Raul Puri's avatar
Raul Puri committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad's avatar
Mohammad committed
16
"""Megatron arguments."""
Raul Puri's avatar
Raul Puri committed
17
18
19
20
21

import argparse
import os


Mohammad's avatar
Mohammad committed
22
23
24
def parse_args(extra_args_provider=None, defaults={}):
    """Parse all arguments."""
    parser = argparse.ArgumentParser(description='Megatron-LM Arguments')
Mohammad's avatar
Mohammad committed
25

Mohammad's avatar
Mohammad committed
26
27
28
29
30
31
32
33
34
35
36
37
    # Standard arguments.
    parser = _add_network_size_args(parser)
    parser = _add_regularization_args(parser)
    parser = _add_training_args(parser)
    parser = _add_initialization_args(parser)
    parser = _add_learning_rate_args(parser)
    parser = _add_checkpointing_args(parser)
    parser = _add_mixed_precision_args(parser)
    parser = _add_distributed_args(parser)
    parser = _add_validation_args(parser)
    parser = _add_data_args(parser)
    parser = _add_autoresume_args(parser)
Mohammad's avatar
Mohammad committed
38
39
    # TODO: Refactor
    parser = _add_gpt2_args(parser)
Mohammad's avatar
Mohammad committed
40
41
42
43

    # Custom arguments.
    if extra_args_provider is not None:
        parser = extra_args_provider(parser)
Mohammad's avatar
Mohammad committed
44

Mohammad's avatar
Mohammad committed
45
46
    # Parse.
    args = parser.parse_args()
Mohammad's avatar
Mohammad committed
47

Mohammad's avatar
Mohammad committed
48
49
    # Set input defaults.
    for key in defaults:
Mohammad's avatar
Mohammad committed
50
51
52
53
54
        # For default to be valid, it should not be provided in the
        # arguments that are passed to the program. We check this by
        # ensuring the arg is set to None.
        assert getattr(args, key) is None, \
            'defaults can only be overwritten for args with None values.'
Mohammad's avatar
Mohammad committed
55
        setattr(args, key, defaults[key])
Mohammad's avatar
Mohammad committed
56

Mohammad's avatar
Mohammad committed
57
58
59
60
61
62
63
    # Distributed args.
    args.rank = int(os.getenv('RANK', '0'))
    args.world_size = int(os.getenv("WORLD_SIZE", '1'))
    args.model_parallel_size = min(args.model_parallel_size, args.world_size)
    if args.rank == 0:
        print('using world size: {} and model-parallel size: {} '.format(
            args.world_size, args.model_parallel_size))
Mohammad's avatar
Mohammad committed
64

Mohammad's avatar
Mohammad committed
65
66
67
68
    # Fp16 loss scaling.
    args.dynamic_loss_scale = False
    if args.loss_scale is None:
        args.dynamic_loss_scale = True
Mohammad's avatar
Mohammad committed
69

Mohammad's avatar
Mohammad committed
70
71
    # Checks.
    assert args.hidden_size % args.num_attention_heads == 0
Mohammad's avatar
Mohammad committed
72
73
74
75
    if args.seq_length is not None:
        assert args.max_position_embeddings >= args.seq_length
    if args.lr is not None:
        assert args.min_lr <= args.lr
Mohammad's avatar
Mohammad committed
76
77
    if args.save is not None:
        assert args.save_interval is not None
Mohammad's avatar
Mohammad committed
78

Mohammad's avatar
Mohammad committed
79
80
    _print_args(args)
    return args
Mohammad's avatar
Mohammad committed
81
82


Mohammad's avatar
Mohammad committed
83
84
85
86
87
88
89
90
91
92
93
def _print_args(args):
    """Print arguments."""
    if args.rank == 0:
        print('-------------------- arguments --------------------', flush=True)
        str_list = []
        for arg in vars(args):
            dots = '.' * (32 - len(arg))
            str_list.append('  {} {} {}'.format(arg, dots, getattr(args, arg)))
        for arg in sorted(str_list, key=lambda x: x.lower()):
            print(arg, flush=True)
        print('---------------- end of arguments ----------------', flush=True)
Mohammad's avatar
Mohammad committed
94
95


Mohammad's avatar
Mohammad committed
96
def _add_network_size_args(parser):
Mohammad's avatar
Mohammad committed
97
    group = parser.add_argument_group(title='network size')
Mohammad's avatar
Mohammad committed
98

Mohammad's avatar
Mohammad committed
99
100
101
102
103
104
105
106
107
108
109
110
    group.add_argument('--num-layers', type=int, required=True,
                       help='Number of transformer layers.')
    group.add_argument('--hidden-size', type=int, required=True,
                       help='Tansformer hidden size.')
    group.add_argument('--num-attention-heads', type=int, required=True,
                       help='Number of transformer attention heads.')
    group.add_argument('--max-position-embeddings', type=int, required=True,
                       help='Maximum number of position embeddings to use. '
                       'This is the size of position embedding.')
    group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
                       help='Pad the vocab size to be divisible by this value.'
                       'This is added for computational efficieny reasons.')
Mohammad's avatar
Mohammad committed
111
112
    group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
                       help='Layer norm epsilon.')
Mohammad's avatar
Mohammad committed
113
114
115
116
    group.add_argument('--apply-residual-connection-post-layernorm',
                       action='store_true',
                       help='If set, use original BERT residula connection '
                       'ordering.')
Mohammad's avatar
Mohammad committed
117

Mohammad's avatar
Mohammad committed
118
119
120
    return parser


Mohammad's avatar
Mohammad committed
121
def _add_regularization_args(parser):
Mohammad's avatar
Mohammad committed
122
123
124
125
126
127
128
129
130
131
132
133
134
    group = parser.add_argument_group(title='regularization')

    group.add_argument('--attention-dropout', type=float, default=0.1,
                       help='Post attention dropout ptobability.')
    group.add_argument('--hidden-dropout', type=float, default=0.1,
                       help='Dropout probability for hidden state transformer.')
    group.add_argument('--weight-decay', type=float, default=0.01,
                       help='Weight decay coefficient for L2 regularization.')
    group.add_argument('--clip-grad', type=float, default=1.0,
                       help='Gradient clipping based on global L2 norm.')

    return parser

Mohammad's avatar
Mohammad committed
135
136

def _add_training_args(parser):
Mohammad's avatar
Mohammad committed
137
138
    group = parser.add_argument_group(title='training')

Mohammad's avatar
Mohammad committed
139
    group.add_argument('--batch-size', type=int, default=None,
Mohammad's avatar
Mohammad committed
140
141
142
143
144
145
146
147
                       help='Batch size per model instance (local batch size). '
                       'Global batch size is local batch size times data '
                       'parallel size.')
    group.add_argument('--checkpoint-activations', action='store_true',
                       help='Checkpoint activation to allow for training '
                       'with larger models, sequences, and batch sizes.')
    group.add_argument('--checkpoint-num-layers', type=int, default=1,
                       help='chunk size (number of layers) for checkpointing.')
Mohammad's avatar
Mohammad committed
148
    group.add_argument('--train-iters', type=int, default=None,
Mohammad's avatar
Mohammad committed
149
150
151
152
153
154
155
156
157
158
159
160
161
                       help='Total number of iterations to train over all '
                       'training runs.')
    group.add_argument('--log-interval', type=int, default=100,
                       help='Report loss and timing interval.')
    group.add_argument('--exit-interval', type=int, default=None,
                       help='Exit the program after the iteration is divisible '
                       'by this value.')
    group.add_argument('--tensorboard-dir', type=str, default=None,
                       help='Write TensorBoard logs to this directory.')

    return parser


Mohammad's avatar
Mohammad committed
162
def _add_initialization_args(parser):
Mohammad's avatar
Mohammad committed
163
164
165
166
167
168
169
170
    group = parser.add_argument_group(title='initialization')

    group.add_argument('--seed', type=int, default=1234,
                       help='Random seed used for python, numpy, '
                       'pytorch, and cuda.')
    group.add_argument('--init-method-std', type=float, default=0.02,
                       help='Standard deviation of the zero mean normal '
                       'distribution used for weight initialization.')
Mohammad's avatar
Mohammad committed
171

Mohammad's avatar
Mohammad committed
172
173
174
    return parser


Mohammad's avatar
Mohammad committed
175
def _add_learning_rate_args(parser):
Mohammad's avatar
Mohammad committed
176
177
    group = parser.add_argument_group(title='learning rate')

Mohammad's avatar
Mohammad committed
178
    group.add_argument('--lr', type=float, default=None,
Mohammad's avatar
Mohammad committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
                       help='Initial learning rate. Depending on decay style '
                       'and initial warmup, the learing rate at each '
                       'iteration would be different.')
    group.add_argument('--lr-decay-style', type=str, default='linear',
                       choices=['constant', 'linear', 'cosine', 'exponential'],
                       help='Learning rate decay function.')
    group.add_argument('--lr-decay-iters', type=int, default=None,
                       help='number of iterations to decay learning rate over,'
                       ' If None defaults to `--train-iters`')
    group.add_argument('--min-lr', type=float, default=0.0,
                       help='Minumum value for learning rate. The scheduler'
                       'clip values below this threshold.')
    group.add_argument('--warmup', type=float, default=0.01,
                       help='Percentage of total iterations to warmup on '
                       '(.01 = 1 percent of all training iters).')
    group.add_argument('--override-lr-scheduler', action='store_true',
                       help='Reset the values of the scheduler (learning rate,'
                       'warmup iterations, minimum learning rate, maximum '
                       'number of iterations, and decay style from input '
                       'arguments and ignore values from checkpoints. Note'
                       'that all the above values will be reset.')
    group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
                       help='Use checkpoint to set the values of the scheduler '
                       '(learning rate, warmup iterations, minimum learning '
                       'rate, maximum number of iterations, and decay style '
                       'from checkpoint and ignore input arguments.')

    return parser


Mohammad's avatar
Mohammad committed
209
def _add_checkpointing_args(parser):
Mohammad's avatar
Mohammad committed
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    group = parser.add_argument_group(title='checkpointing')

    group.add_argument('--save', type=str, default=None,
                       help='Output directory to save checkpoints to.')
    group.add_argument('--save-interval', type=int, default=None,
                       help='Number of iterations between checkpoint saves.')
    group.add_argument('--no-save-optim', action='store_true',
                       help='Do not save current optimizer.')
    group.add_argument('--no-save-rng', action='store_true',
                       help='Do not save current rng state.')
    group.add_argument('--load', type=str, default=None,
                       help='Directory containing a model checkpoint.')
    group.add_argument('--no-load-optim', action='store_true',
                       help='Do not load optimizer when loading checkpoint.')
    group.add_argument('--no-load-rng', action='store_true',
                       help='Do not load rng state when loading checkpoint.')
    group.add_argument('--finetune', action='store_true',
                       help='Load model for finetuning. Do not load optimizer '
                       'or rng state from checkpoint and set iteration to 0. '
                       'Assumed when loading a release checkpoint.')

    return parser


Mohammad's avatar
Mohammad committed
234
def _add_mixed_precision_args(parser):
Mohammad's avatar
Mohammad committed
235
236
237
238
239
240
241
242
243
244
    group = parser.add_argument_group(title='mixed precision')

    group.add_argument('--fp16', action='store_true',
                       help='Run model in fp16 mode.')
    group.add_argument('--apply-query-key-layer-scaling', action='store_true',
                       help='Scale Q * K^T by 1 / layer-number. If this flag '
                       'is set, then it will automatically set '
                       'attention-softmax-in-fp32 to true')
    group.add_argument('--attention-softmax-in-fp32', action='store_true',
                       help='Run attention masking and softmax in fp32.')
Mohammad's avatar
Mohammad committed
245
246
    group.add_argument('--fp32-allreduce', action='store_true',
                       help='All-reduce in fp32')
Mohammad's avatar
Mohammad committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
    group.add_argument('--hysteresis', type=int, default=2,
                       help='hysteresis for dynamic loss scaling')
    group.add_argument('--loss-scale', type=float, default=None,
                       help='Static loss scaling, positive power of 2 '
                       'values can improve fp16 convergence. If None, dynamic'
                       'loss scaling is used.')
    group.add_argument('--loss-scale-window', type=float, default=1000,
                       help='Window over which to raise/lower dynamic scale.')
    group.add_argument('--min-scale', type=float, default=1,
                       help='Minimum loss scale for dynamic loss scale.')

    return parser


Mohammad's avatar
Mohammad committed
261
def _add_distributed_args(parser):
Mohammad's avatar
Mohammad committed
262
263
    group = parser.add_argument_group(title='mixed precision')

Mohammad's avatar
Mohammad committed
264
265
    group.add_argument('--model-parallel-size', type=int, default=1,
                       help='Size of the model parallel.')
Mohammad's avatar
Mohammad committed
266
267
268
269
    group.add_argument('--distributed-backend', default='nccl',
                       choices=['nccl', 'gloo'],
                       help='Which backend to use for distributed training.')
    group.add_argument('--DDP-impl', default='local',
Mohammad's avatar
Mohammad committed
270
                       choices=['local', 'torch'],
Mohammad's avatar
Mohammad committed
271
272
273
274
275
276
277
278
                       help='which DistributedDataParallel implementation '
                       'to use.')
    group.add_argument('--local_rank', type=int, default=None,
                       help='local rank passed from distributed launcher.')

    return parser


Mohammad's avatar
Mohammad committed
279
def _add_validation_args(parser):
Mohammad's avatar
Mohammad committed
280
281
282
283
284
285
286
287
288
    group = parser.add_argument_group(title='validation')

    group.add_argument('--eval-iters', type=int, default=100,
                       help='Number of iterations to run for evaluation'
                       'validation/test for.')
    group.add_argument('--eval-interval', type=int, default=1000,
                       help='Interval between running evaluation on '
                       'validation set.')

Mohammad's avatar
Mohammad committed
289
290
291
    return parser


Mohammad's avatar
Mohammad committed
292
def _add_data_args(parser):
Mohammad's avatar
Mohammad committed
293
294
    group = parser.add_argument_group(title='data and dataloader')

Mohammad's avatar
Mohammad committed
295
    group.add_argument('--data-path', type=str, default=None,
Mohammad's avatar
Mohammad committed
296
                       help='Path to combined dataset to split.')
Mohammad's avatar
Mohammad committed
297
    group.add_argument('--split', type=str, default='969, 30, 1',
Mohammad's avatar
Mohammad committed
298
299
300
301
                       help='Comma-separated list of proportions for training,'
                       ' validation, and test split. For example the split '
                       '`90,5,5` will use 90% of data for training, 5% for '
                       'validation and 5% for test.')
Mohammad's avatar
Mohammad committed
302
    group.add_argument('--vocab-file', type=str, default=None,
Mohammad's avatar
Mohammad committed
303
                       help='Path to the vocab file.')
Mohammad's avatar
Mohammad committed
304
305
    group.add_argument('--merge-file', type=str, default=None,
                       help='Path to the BPE merge file.')
Mohammad's avatar
Mohammad committed
306
    group.add_argument('--seq-length', type=int, default=None,
Mohammad's avatar
Mohammad committed
307
308
309
310
311
312
313
314
315
                       help="Maximum sequence length to process.")
    group.add_argument('--mask-prob', type=float, default=0.15,
                       help='Probability of replacing a token with mask.')
    group.add_argument('--short-seq-prob', type=float, default=0.1,
                       help='Probability of producing a short sequence.')
    group.add_argument('--mmap-warmup', action='store_true',
                       help='Warm up mmap files.')
    group.add_argument('--num-workers', type=int, default=2,
                       help="Dataloader number of workers.")
Mohammad's avatar
Mohammad committed
316
317
318
319
320
    group.add_argument('--tokenizer-type', type=str,
                       default=None,
                       choices=['BertWordPieceLowerCase',
                                'GPT2BPETokenizer'],
                       help='What type of tokenizer to use.')
321
322
323
324
325
326
327
328
329
330
    group.add_argument('--data-impl', type=str, default='infer',
                       choices=['lazy', 'cached', 'mmap', 'infer'],
                       help='Implementation of indexed datasets.')
    group.add_argument('--reset-position-ids', action='store_true',
                       help='Reset posistion ids after end-of-document token.')
    group.add_argument('--reset-attention-mask', action='store_true',
                       help='Reset self attention maske after '
                       'end-of-document token.')
    group.add_argument('--eod-mask-loss', action='store_true',
                       help='Mask loss for the end of document tokens.')
Mohammad's avatar
Mohammad committed
331

Mohammad's avatar
Mohammad committed
332
333
    return parser

Raul Puri's avatar
Raul Puri committed
334

Mohammad's avatar
Mohammad committed
335
336
def _add_autoresume_args(parser):
    group = parser.add_argument_group(title='autoresume')
Raul Puri's avatar
Raul Puri committed
337

Mohammad's avatar
Mohammad committed
338
339
340
341
342
    group.add_argument('--adlr-autoresume', action='store_true',
                       help='Enable autoresume on adlr cluster.')
    group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
                       help='Intervals over which check for autoresume'
                       'termination signal')
Raul Puri's avatar
Raul Puri committed
343

Mohammad's avatar
Mohammad committed
344
    return parser
Raul Puri's avatar
Raul Puri committed
345
346


Mohammad's avatar
Mohammad committed
347
########################################################################
Raul Puri's avatar
Raul Puri committed
348
349


Mohammad's avatar
Mohammad committed
350
351
def _add_gpt2_args(parser):
    group = parser.add_argument_group(title='gpt2')
Raul Puri's avatar
Raul Puri committed
352

Mohammad's avatar
Mohammad committed
353
354
355
    group.add_argument('--input-data-sizes-file', type=str, default='sizes.txt',
                       help='The filename containing all the shards '
                       'sizes for numpy data loader')
356

Raul Puri's avatar
Raul Puri committed
357
358
359
360
    return parser



Mohammad's avatar
Mohammad committed
361
def add_data_args_(parser):
Raul Puri's avatar
Raul Puri committed
362
363
364
365
    """Train/valid/test data arguments."""

    group = parser.add_argument_group('data', 'data configurations')

366
367
368
369
    group.add_argument('--data-loader', type=str, default=None,
                       choices=['raw', 'lazy', 'tfrecords', 'numpy', 'binary'],
                       help='Which data loader to use. Default varies by model.')

370

Raul Puri's avatar
Raul Puri committed
371
372
    return parser