optimizer.py 27.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Megatron optimizer."""
mohammad's avatar
mohammad committed
17
18
19
20
21

from abc import ABC
from abc import abstractmethod
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
22
23
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
mohammad's avatar
mohammad committed
24

mohammad's avatar
mohammad committed
25
26
from megatron import get_timers
from megatron import mpu
mohammad's avatar
mohammad committed
27
from megatron import print_rank_0
28
29
30
from megatron.model import DistributedDataParallel as LocalDDP
from megatron.model import Float16Module
from megatron.utils import unwrap_model
mohammad's avatar
mohammad committed
31

Rewon Child's avatar
Rewon Child committed
32
from .clip_grads import clip_grad_norm_fp32, count_zeros_fp32
mohammad's avatar
mohammad committed
33

Lawrence McAfee's avatar
Lawrence McAfee committed
34

mohammad's avatar
mohammad committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
def _zero_grad_group_helper(group, set_to_none):
    """Zero out the gradient for a group of parameters.
    Note: copied from torch.optim.optimizer."""
    for param in group:
        if param.grad is not None:
            if set_to_none:
                param.grad = None
            else:
                if param.grad.grad_fn is not None:
                    param.grad.detach_()
                else:
                    param.grad.requires_grad_(False)
                param.grad.zero_()


50
def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
51
52
53
54
    """Use multi-tensor-applier to copy values from one list to another.
    We don't have a blfoat16 implementation so for now if the overflow_buf
    is not provided, we default back to simple loop copy to be compatible
    with bfloat16."""
55
56
    if overflow_buf:
        overflow_buf.fill_(0)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
57
58
59
60
61
        # Scaling with factor `1.0` is equivalent to copy.
        multi_tensor_applier(amp_C.multi_tensor_scale,
                             overflow_buf,
                             [this, that],
                             1.0)
62
    else:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
63
64
65
        for this_, that_ in zip(this, that):
            that_.copy_(this_)

66

mohammad's avatar
mohammad committed
67
68
69

class MegatronOptimizer(ABC):

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
70
71
72

    def __init__(self, optimizer, clip_grad,
                 log_num_zeros_in_grad,
73
                 params_have_main_grad,
74
75
                 use_contiguous_buffers_in_local_ddp,
                 models):
76

mohammad's avatar
mohammad committed
77
78
79
        """Input optimizer is the base optimizer for example Adam."""
        self.optimizer = optimizer
        assert self.optimizer, 'no optimizer is provided.'
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
80
81
82
83
        # Set gradient clipping and logging params.
        self.clip_grad = clip_grad
        self.log_num_zeros_in_grad = log_num_zeros_in_grad
        self.params_have_main_grad = params_have_main_grad
84
        self.use_contiguous_buffers_in_local_ddp = use_contiguous_buffers_in_local_ddp
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
85

86
87
88
89
        # 'models' are retained for access to the contiguous grad buffers.
        # (see distributed optimizer)
        self.models = models

90
        if self.use_contiguous_buffers_in_local_ddp:
91
92
            assert self.params_have_main_grad, \
                "use of contiguous buffer requires that params have main grad"
mohammad's avatar
mohammad committed
93

Rewon Child's avatar
Rewon Child committed
94
    def get_parameters(self):
95
96
97
98
        params = []
        for param_group in self.optimizer.param_groups:
            for param in param_group['params']:
                params.append(param)
Rewon Child's avatar
Rewon Child committed
99
100
        return params

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
101

102
103
104
105
106
    def get_model_parallel_group(self):
        '''Default returned here, but the distributed optimizer overrides this.'''
        return mpu.get_model_parallel_group()


107
    def clip_grad_norm(self, clip_grad):
Lawrence McAfee's avatar
Lawrence McAfee committed
108
        params = self.get_parameters()
109
110
        return clip_grad_norm_fp32(
            params, clip_grad,
111
            model_parallel_group=self.get_model_parallel_group())
112

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
113

Rewon Child's avatar
Rewon Child committed
114
115
    def count_zeros(self):
        params = self.get_parameters()
116
117
        return count_zeros_fp32(params,
                                model_parallel_group=self.get_model_parallel_group())
Rewon Child's avatar
Rewon Child committed
118

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
119

mohammad's avatar
mohammad committed
120
121
122
123
    @abstractmethod
    def zero_grad(self, set_to_none=True):
        pass

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
124

mohammad's avatar
mohammad committed
125
126
    @abstractmethod
    def get_loss_scale(self):
127
        """The output should be a cuda tensor of size 1."""
mohammad's avatar
mohammad committed
128
129
        pass

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
130

mohammad's avatar
mohammad committed
131
132
133
134
    def scale_loss(self, loss):
        """Simple scaling."""
        return self.get_loss_scale() * loss

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
135

136
137
    @abstractmethod
    def reload_model_params(self):
138
139
140
141
142
        """Refreshes any internal state from the current model parameters.
        Call whenever the parameters are changed outside of the optimizer.
        For example, when we load a model from a checkpoint  without loading
        the optimizer, the model parameters are updated but for fp16 optimizer
        with main parameters, the main parameters need to also be updated."""
143
144
        pass

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
145

mohammad's avatar
mohammad committed
146
147
148
149
    @abstractmethod
    def state_dict(self):
        pass

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
150

mohammad's avatar
mohammad committed
151
152
153
154
    @abstractmethod
    def load_state_dict(self, state_dict):
        pass

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
155

mohammad's avatar
mohammad committed
156
157
158
159
160
161
162
163
164
165
    # Promote state so it can be retrieved or set via
    # "optimizer_instance.state"
    def _get_state(self):
        return self.optimizer.state

    def _set_state(self, value):
        self.optimizer.state = value

    state = property(_get_state, _set_state)

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
166

mohammad's avatar
mohammad committed
167
168
169
170
171
172
173
174
175
176
177
178
    # Promote param_groups so it can be retrieved or set via
    # "optimizer_instance.param_groups"
    # (for example, to adjust the learning rate)
    def _get_param_groups(self):
        return self.optimizer.param_groups

    def _set_param_groups(self, value):
        self.optimizer.param_groups = value

    param_groups = property(_get_param_groups, _set_param_groups)


179
    @abstractmethod
180
    def step(self, args, timers):
181
182
        pass

183
    def gather_model_params(self, args, timers):
184
185
        '''For the case of a non-distributed-optimizer, there is nothing to
        do here.'''
186
187
        pass

188
    def allreduce_word_embedding_grads(self, args):
189
190
        '''
        All-reduce word embedding grads.
191

192
193
194
195
        Reduce grads across first and last stages to ensure that word_embeddings
        parameters stay in sync. This should only run for models that support
        pipelined model parallelism (BERT and GPT-2).
        '''
196
197
198
199

        if mpu.is_rank_in_embedding_group(ignore_virtual=True) and \
                mpu.get_pipeline_model_parallel_world_size() > 1:
            if mpu.is_pipeline_first_stage(ignore_virtual=True):
200
                unwrapped_model = self.models[0]
201
            elif mpu.is_pipeline_last_stage(ignore_virtual=True):
202
                unwrapped_model = self.models[-1]
203
            else:  # We do not support the interleaved schedule for T5 yet.
204
                unwrapped_model = self.models[0]
205
206
207
208
209
210
211
212
213
214
215
            unwrapped_model = unwrap_model(
                unwrapped_model, (torchDDP, LocalDDP, Float16Module))

            if unwrapped_model.share_word_embeddings:
                word_embeddings_weight = unwrapped_model.word_embeddings_weight()
                if args.DDP_impl == 'local':
                    grad = word_embeddings_weight.main_grad
                else:
                    grad = word_embeddings_weight.grad
                torch.distributed.all_reduce(grad, group=mpu.get_embedding_group())

216
    def allreduce_position_embedding_grads(self, args):
217
218
219
220
221
222
        '''
        All-reduce position_embeddings grad across first (encoder) and
        split (decoder) stages to ensure that position embeddings parameters
        stay in sync. This should only run for T5 models with pipeline
        parallelism.
        '''
223
224
225
        if mpu.is_rank_in_position_embedding_group() and \
                mpu.get_pipeline_model_parallel_world_size() > 1 and \
                args.pipeline_model_parallel_split_rank is not None:
226
            unwrapped_model = self.models[0]
227
228
229
230
231
232
            unwrapped_model = unwrap_model(
                unwrapped_model, (torchDDP, LocalDDP, Float16Module))
            assert args.DDP_impl == 'local', \
                'T5 model is only supported with local DDP mode'
            grad = unwrapped_model.language_model.embedding.position_embeddings.weight.main_grad
            torch.distributed.all_reduce(grad, group=mpu.get_position_embedding_group())
233

234
235
236
    def allreduce_embedding_grads(self, args):
        self.allreduce_word_embedding_grads(args)
        self.allreduce_position_embedding_grads(args)
237

238
    def reduce_model_grads(self, args, timers):
239
240
241
242

        # All-reduce if needed.
        if args.DDP_impl == 'local':
            timers('backward-params-all-reduce').start()
243
244
            for model in self.models:
                model.allreduce_gradients()
245
246
247
248
            timers('backward-params-all-reduce').stop()

        # All-reduce embedding grads.
        timers('backward-embedding-all-reduce').start()
249
        self.allreduce_embedding_grads(args)
250
251
        timers('backward-embedding-all-reduce').stop()

252

253
class MixedPrecisionOptimizer(MegatronOptimizer):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
254
255

    def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad,
256
                 params_have_main_grad, use_contiguous_buffers_in_local_ddp,
257
258
                 bf16, grad_scaler,
                 models):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
259

Lawrence McAfee's avatar
Lawrence McAfee committed
260
        super().__init__(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
261
            optimizer, clip_grad, log_num_zeros_in_grad,
262
263
            params_have_main_grad, use_contiguous_buffers_in_local_ddp,
            models)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
264
265

        self.bf16 = bf16
mohammad's avatar
mohammad committed
266
        self.grad_scaler = grad_scaler
267

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
268
269
270
        # None grad scaler is only supported for bf16.
        if self.grad_scaler is None:
            assert self.bf16, 'fp16 expects a grad scaler.'
mohammad's avatar
mohammad committed
271
272
273

        # Tensor used to determine if a nan/if has happend.
        # Any non-zero value indicates inf/nan.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
274
275
276
277
        # Note that we keep this for the cases that grad scaler is none.
        # We still record nan/inf if we have a bfloat16 with a grad scaler.
        if self.grad_scaler:
            self.found_inf = torch.cuda.FloatTensor([0.0])
mohammad's avatar
mohammad committed
278
279

        # Dummy tensor needed for apex multi-apply tensor.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
280
281
282
283
284
285
286
287
288
289
        # For bfloat, we don't have multi-tensor apply and for now
        # we set it to none so the multi-tensor apply gets ignored.
        if bf16:
            self._dummy_overflow_buf = None
        else:
            self._dummy_overflow_buf = torch.cuda.IntTensor([0])

        # In case grad scaler is not passed, define the unity scale.
        if self.grad_scaler is None:
            self._scale_one = torch.cuda.FloatTensor([1.0])
mohammad's avatar
mohammad committed
290

Lawrence McAfee's avatar
Lawrence McAfee committed
291
292
293
294
295
296
297

    def get_loss_scale(self):
        if self.grad_scaler is None:
            return self._scale_one
        return self.grad_scaler.scale


Lawrence McAfee's avatar
Lawrence McAfee committed
298
299
300
301
    def reload_model_params(self):
        self._copy_model_params_to_main_params()


302
    def _unscale_main_grads_and_check_for_nan(self):
Lawrence McAfee's avatar
Lawrence McAfee committed
303
304
305
306
307
308
309
310
311
312
313
314
315

        # Collect main grads.
        main_grads = self._collect_main_grad_data_for_unscaling()

        # Reset found inf.
        self.found_inf.fill_(0.0)

        # Unscale and set found inf/nan
        torch._amp_foreach_non_finite_check_and_unscale_(
            main_grads, self.found_inf, self.grad_scaler.inv_scale)

        # Update across all model parallel instances.
        torch.distributed.all_reduce(self.found_inf,
316
317
                                     op=torch.distributed.ReduceOp.MAX,
                                     group=self.get_model_parallel_group())
Lawrence McAfee's avatar
Lawrence McAfee committed
318
319
320
321
322
323

        # Check for nan.
        found_inf_flag = (self.found_inf.item() > 0)

        return found_inf_flag

Lawrence McAfee's avatar
Lawrence McAfee committed
324
    # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Lawrence McAfee's avatar
Lawrence McAfee committed
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
    @classmethod
    def debug_base(cls, ITERATION, key, value):
        from megatron import get_args
        args = get_args()
        my_rank = torch.distributed.get_rank()
        if ITERATION != DEBUG_ITERATION:
            return
        for r in range(torch.distributed.get_world_size()):
            if my_rank == r:
                # prefix = "            + "
                prefix = ""
                print("%sbr/%s; [r%d, i%d]; %s, %.12e" % (prefix, "fix " if args.use_distributed_optimizer else "main", my_rank, ITERATION, key, value))
            torch.distributed.barrier()
        torch.distributed.barrier()
        # if my_rank == 0:
        #     raise Exception("debug.")
        # else:
        #     exit(0)
        exit(0)
    def debug_model(self, ITERATION, key, use_grad):
        use_grad = bool(use_grad)
        tensors = [
            (p.main_grad.float() if use_grad else p.float())
            for m in self.models for p in m.parameters()
        ]
        count = sum(t.nelement() for t in tensors)
        return self.debug_base(
            ITERATION,
            "model/%s, %s [count %d]" % (
                "grad" if use_grad else "param",
                key,
                count,
            ),
            # sum(torch.sum(torch.abs(t)) for t in tensors).item() / count,
            sum(torch.sum(torch.abs(t)) for t in tensors),
        )
    def debug_main(self, ITERATION, key, use_grad):
        use_grad = bool(use_grad)
        tensors = [
            p.grad if use_grad else p
            for g in self.optimizer.param_groups
            for p in g["params"]
        ]
        tensors = [ t.float() for t in tensors ]
        count = sum(t.nelement() for t in tensors)
        return self.debug_base(
            ITERATION,
            "main/%s, %s [count %d]" % (
                "grad" if use_grad else "param",
                key,
                count,
            ),
            sum(torch.sum(torch.abs(t)) for t in tensors),
        )
Lawrence McAfee's avatar
Lawrence McAfee committed
379
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
Lawrence McAfee's avatar
Lawrence McAfee committed
380
381

    @torch.no_grad()
382
    def step(self, args, timers):
383

Lawrence McAfee's avatar
Lawrence McAfee committed
384
385
        # Copy gradients from model params to main params.
        timers('optimizer-copy-to-main-grad').start()
386
        self._copy_model_grads_to_main_grads()
Lawrence McAfee's avatar
Lawrence McAfee committed
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
        timers('optimizer-copy-to-main-grad').stop()

        # Do unscale, check for inf, and update grad scaler only for
        # the case that grad scaler is provided.
        if self.grad_scaler:

            # Unscale and check for inf/nan.
            timers('optimizer-unscale-and-check-inf').start()
            found_inf_flag = self._unscale_main_grads_and_check_for_nan()
            timers('optimizer-unscale-and-check-inf').stop()

            # We are done with scaling gradients
            # so we can update the loss scale.
            self.grad_scaler.update(found_inf_flag)

            # If we found inf/nan, skip the update.
            if found_inf_flag:
                return False, None, None

        # Clip the main gradients.
        timers('optimizer-clip-main-grad').start()
        grad_norm = None
        if self.clip_grad > 0.0:
410
            grad_norm = self.clip_grad_norm(self.clip_grad)
Lawrence McAfee's avatar
Lawrence McAfee committed
411
412
413
414
415
416
        timers('optimizer-clip-main-grad').stop()

        # count the zeros in the grads
        num_zeros_in_grad = self.count_zeros() if \
                            self.log_num_zeros_in_grad else None

417
418
419
        # Step the optimizer.
        self.optimizer.step()

Lawrence McAfee's avatar
Lawrence McAfee committed
420
421
        # Update params from main params.
        timers('optimizer-copy-main-to-model-params').start()
422
        self._copy_main_params_to_model_params()
Lawrence McAfee's avatar
Lawrence McAfee committed
423
424
425
426
427
428
        timers('optimizer-copy-main-to-model-params').stop()

        # Successful update.
        return True, grad_norm, num_zeros_in_grad


429
class Float16OptimizerWithFloat16Params(MixedPrecisionOptimizer):
Lawrence McAfee's avatar
Lawrence McAfee committed
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    """Float16 optimizer for fp16 and bf16 data types.

    Arguments:
        optimizer: base optimizer such as Adam or SGD
        clip_grad: clip gradeints with this global L2 norm. Note
            that clipping is ignored if clip_grad == 0
        log_num_zeros_in_grad: return number of zeros in the gradients.
        params_have_main_grad: flag indicating if parameters have
            a `main_grad` field. If this is set, we are assuming
            that the model parameters are store in the `main_grad`
            field instead of the typical `grad` field. This happens
            for the DDP cases where there is a continuous buffer
            holding the gradients. For example for bfloat16, we want
            to do gradient accumulation and all-reduces in float32
            and as a result we store those gradients in the main_grad.
            Note that main grad is not necessarily in float32.
        bf16: if true, the model is running in bfloat16.
        grad_scaler: used for scaling gradients. Note that this can be
            None. This case happens when `bf16 = True` and we don't
            use any loss scale. Note that for `bf16 = True`, we can have
            a constnat gradient scaler. Also for `bf16 = False`, we
            always require a grad scaler.
    """

    def __init__(self, optimizer, clip_grad, log_num_zeros_in_grad,
                 params_have_main_grad, use_contiguous_buffers_in_local_ddp,
Lawrence McAfee's avatar
Lawrence McAfee committed
456
                 bf16, grad_scaler, models):
Lawrence McAfee's avatar
Lawrence McAfee committed
457
458
459
460

        super().__init__(
            optimizer, clip_grad, log_num_zeros_in_grad,
            params_have_main_grad, use_contiguous_buffers_in_local_ddp,
Lawrence McAfee's avatar
Lawrence McAfee committed
461
            bf16, grad_scaler, models)
Lawrence McAfee's avatar
Lawrence McAfee committed
462

mohammad's avatar
mohammad committed
463
        # ======================
464
        # main parameter stuff
mohammad's avatar
mohammad committed
465
466
467
        # ======================

        # Three groups of parameters:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
468
469
        #   float16_groups: original float16 parameters
        #   fp32_from_float16_groups: fp32 copy of float16 parameters
mohammad's avatar
mohammad committed
470
        #   fp32_from_fp32_groups: original fp32 parameters
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
471
472
        self.float16_groups = []
        self.fp32_from_float16_groups = []
mohammad's avatar
mohammad committed
473
474
475
476
        self.fp32_from_fp32_groups = []

        # For all the groups in the original optimizer:
        for param_group in self.optimizer.param_groups:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
477
            float16_params_this_group = []
mohammad's avatar
mohammad committed
478
            fp32_params_this_group = []
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
479
            fp32_from_float16_params_this_group = []
mohammad's avatar
mohammad committed
480
481
482
483
            # For all the parameters in this group:
            for i, param in enumerate(param_group['params']):
                if param.requires_grad:

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
484
485
486
487
                    # float16 params:
                    if param.type() in ['torch.cuda.HalfTensor',
                                        'torch.cuda.BFloat16Tensor']:
                        float16_params_this_group.append(param)
mohammad's avatar
mohammad committed
488
                        # Create a copy
489
                        main_param = param.detach().clone().float()
mohammad's avatar
mohammad committed
490
                        # Copy tensor model parallel attributes.
491
                        mpu.copy_tensor_model_parallel_attributes(main_param,
mohammad's avatar
mohammad committed
492
                                                                  param)
493
                        if hasattr(param, 'shared'):
494
                            main_param.shared = param.shared
mohammad's avatar
mohammad committed
495
                        # Replace the optimizer params with the new fp32 copy.
496
                        param_group['params'][i] = main_param
497

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
498
                        fp32_from_float16_params_this_group.append(main_param)
499
                        # Reset existing state dict key to the new main param.
mohammad's avatar
mohammad committed
500
                        if param in self.optimizer.state:
501
                            self.optimizer.state[main_param] \
mohammad's avatar
mohammad committed
502
503
504
505
506
507
508
509
                                = self.optimizer.state.pop(param)

                    # fp32 params.
                    elif param.type() == 'torch.cuda.FloatTensor':
                        fp32_params_this_group.append(param)
                        param_group['params'][i] = param

                    else:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
510
511
512
513
514
515
516
517
518
                        raise TypeError('Wrapped parameters must be one of '
                                        'torch.cuda.FloatTensor,  '
                                        'torch.cuda.HalfTensor, or '
                                        'torch.cuda.BFloat16Tensor. '
                                        'Received {}'.format(param.type()))

            self.float16_groups.append(float16_params_this_group)
            self.fp32_from_float16_groups.append(
                fp32_from_float16_params_this_group)
mohammad's avatar
mohammad committed
519
520
521
522
523
524
525
526
527
            self.fp32_from_fp32_groups.append(fp32_params_this_group)

        # Leverage state_dict() and load_state_dict() to
        # recast preexisting per-param state tensors
        self.optimizer.load_state_dict(self.optimizer.state_dict())


    def zero_grad(self, set_to_none=True):
        """We only need to zero the model related parameters, i.e.,
528
529
530
531
        float16_groups & fp32_from_fp32_groups. We additionally zero
        fp32_from_float16_groups as a memory optimization to reduce
        fragmentation; in the case of set_to_none==True, the space
        used by this field can be safely deallocated at this point."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
532
        for group in self.float16_groups:
mohammad's avatar
mohammad committed
533
            _zero_grad_group_helper(group, set_to_none)
534
535
        for group in self.fp32_from_float16_groups:
            _zero_grad_group_helper(group, set_to_none)
mohammad's avatar
mohammad committed
536
537
538
539
        for group in self.fp32_from_fp32_groups:
            _zero_grad_group_helper(group, set_to_none)


540
    def _collect_main_grad_data_for_unscaling(self):
541

542
        main_grads = []
543

544
545
546
547
548
        # fp32 params from float16 ones.
        for main_group in self.fp32_from_float16_groups:
            for main_param in main_group:
                if main_param.grad is not None:
                    main_grads.append(main_param.grad.data)
549

550
551
552
553
554
555
556
        # Append fp32 parameters.
        for main_group in self.fp32_from_fp32_groups:
            for main_param in main_group:
                if main_param.grad is not None:
                    main_grads.append(main_param.grad.data)
        
        return main_grads
557
558


559
560
561
562
563
564
565
566
567
    def _get_model_and_main_params_data_float16(self):
        model_data = []
        main_data = []
        for model_group, main_group in zip(self.float16_groups,
                                           self.fp32_from_float16_groups):
            for model_param, main_param in zip(model_group, main_group):
                model_data.append(model_param.data)
                main_data.append(main_param.data)
        return model_data, main_data
568

Lawrence McAfee's avatar
Lawrence McAfee committed
569

570
    def _copy_model_grads_to_main_grads(self):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
571
572
573
        # This only needs to be done for the float16 group.
        for model_group, main_group in zip(self.float16_groups,
                                           self.fp32_from_float16_groups):
574
            for model_param, main_param in zip(model_group, main_group):
575
                if self.params_have_main_grad and hasattr(model_param, 'main_grad'):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
576
577
578
579
                    main_param.grad = model_param.main_grad.float()
                else:
                    if model_param.grad is not None:
                        main_param.grad = model_param.grad.float()
580
581
582
583
584

                # Safe to deallocate model's grad/main_grad after copying.
                # (If using contiguous buffers, main_grad's memory should
                # persist and therefore should not be deallocated.)
                model_param.grad = None
585
                if self.params_have_main_grad and \
586
                   not self.use_contiguous_buffers_in_local_ddp:
587
588
                    model_param.main_grad = None

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
589
590
591
592
593
        # For fp32 grads, we need to reset the grads to main grad.
        if self.params_have_main_grad:
            for model_group in self.fp32_from_fp32_groups:
                for model_param in model_group:
                    model_param.grad = model_param.main_grad
mohammad's avatar
mohammad committed
594

595
596
597
                    # Safe to de-reference model's main_grad after copying.
                    # (If using contiguous buffers, main_grad's memory should
                    # persist and therefore should not be deallocated.)
598
                    if not self.use_contiguous_buffers_in_local_ddp:
599
                        model_param.main_grad = None
mohammad's avatar
mohammad committed
600

601

602
    def _copy_main_params_to_model_params(self):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
603
604
        # Only needed for the float16 params.
        model_data, main_data = self._get_model_and_main_params_data_float16()
605
606
607
608
609
        _multi_tensor_copy_this_to_that(this=main_data, that=model_data,
                                        overflow_buf=self._dummy_overflow_buf)


    def _copy_model_params_to_main_params(self):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
610
611
        # Only needed for the float16 params.
        model_data, main_data = self._get_model_and_main_params_data_float16()
612
613
        _multi_tensor_copy_this_to_that(this=model_data, that=main_data,
                                        overflow_buf=self._dummy_overflow_buf)
614
615


mohammad's avatar
mohammad committed
616
617
618
    def state_dict(self):
        state_dict = {}
        state_dict['optimizer'] = self.optimizer.state_dict()
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
619
620
621
        if self.grad_scaler:
            state_dict['grad_scaler'] = self.grad_scaler.state_dict()
        state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups
mohammad's avatar
mohammad committed
622
623
624
625
        return state_dict


    def load_state_dict(self, state_dict):
mohammad's avatar
mohammad committed
626
627
628
629
630
631
632
633
634
635
636
637
638
        # Optimizer.
        optimizer_key = 'optimizer'
        if optimizer_key not in state_dict:
            optimizer_key = 'optimizer_state_dict'
            print_rank_0('***WARNING*** loading optimizer from '
                         'an old checkpoint ...')
        self.optimizer.load_state_dict(state_dict[optimizer_key])

        # Grad scaler.
        if 'grad_scaler' not in state_dict:
            print_rank_0('***WARNING*** found an old checkpoint, will not '
                         'load grad scaler ...')
        else:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
639
640
641
642
643
644
            if self.grad_scaler:
                self.grad_scaler.load_state_dict(state_dict['grad_scaler'])
            else:
                print_rank_0('***WARNING*** fould the grad scaler in the '
                             'checkpoint but it is None in the class. '
                             'Skipping loading grad scaler ...')
mohammad's avatar
mohammad committed
645

646
        # Copy data for the main params.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
647
648
649
        fp32_from_float16_params_key = 'fp32_from_fp16_params'
        if fp32_from_float16_params_key not in state_dict:
            fp32_from_float16_params_key = 'fp32_from_fp16'
mohammad's avatar
mohammad committed
650
        for current_group, saved_group in zip(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
651
652
                self.fp32_from_float16_groups,
                state_dict[fp32_from_float16_params_key]):
mohammad's avatar
mohammad committed
653
654
655
656
            for current_param, saved_param in zip(current_group, saved_group):
                current_param.data.copy_(saved_param.data)


mohammad's avatar
mohammad committed
657
658
class FP32Optimizer(MegatronOptimizer):

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
659
660
    def __init__(self, optimizer, clip_grad,
                 log_num_zeros_in_grad,
661
                 params_have_main_grad,
662
663
                 use_contiguous_buffers_in_local_ddp,
                 models):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
664
665
666

        super(FP32Optimizer, self).__init__(
            optimizer, clip_grad, log_num_zeros_in_grad,
667
668
            params_have_main_grad, use_contiguous_buffers_in_local_ddp,
            models)
mohammad's avatar
mohammad committed
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684

        self._scale = torch.cuda.FloatTensor([1.0])


    def zero_grad(self, set_to_none=True):
        """Copied from torch.optim.optimizer"""
        for group in self.optimizer.param_groups:
            _zero_grad_group_helper(group['params'], set_to_none)


    def get_loss_scale(self):
        """FP32 optimizer does not do any scaling."""
        return self._scale


    @torch.no_grad()
685
    def step(self, args, timers):
mohammad's avatar
mohammad committed
686
        """Clip gradients (if needed) and step the base optimizer.
mohammad's avatar
mohammad committed
687
        Always return successful since there is no overflow."""
mohammad's avatar
mohammad committed
688

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
689
690
691
692
693
694
        # Copy main_grads to grads.
        if self.params_have_main_grad:
            for param_group in self.optimizer.param_groups:
                for param in param_group['params']:
                    param.grad = param.main_grad

695
696
697
                    # Safe to de-reference model's main_grad after copying.
                    # (If using contiguous buffers, main_grad's memory should
                    # persist and therefore should not be deallocated.)
698
                    if not self.use_contiguous_buffers_in_local_ddp:
699
700
                        param.main_grad = None

mohammad's avatar
mohammad committed
701
        # Clip gradients.
702
        grad_norm = None
mohammad's avatar
mohammad committed
703
        if self.clip_grad > 0.0:
704
            grad_norm = self.clip_grad_norm(self.clip_grad)
mohammad's avatar
mohammad committed
705

Rewon Child's avatar
Rewon Child committed
706
        # count the zeros in the grads
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
707
708
        num_zeros_in_grad = self.count_zeros() if \
                            self.log_num_zeros_in_grad else None
Rewon Child's avatar
Rewon Child committed
709

mohammad's avatar
mohammad committed
710
711
712
713
        # Update parameters.
        self.optimizer.step()

        # No overflow for FP32 optimizer.
714
        return True, grad_norm, num_zeros_in_grad
mohammad's avatar
mohammad committed
715
716


717
718
719
720
    def reload_model_params(self):
        pass


mohammad's avatar
mohammad committed
721
722
723
724
725
726
    def state_dict(self):
        return self.optimizer.state_dict()


    def load_state_dict(self, state_dict):
        self.optimizer.load_state_dict(state_dict)