module.py 8.73 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Megatron Module"""

import torch
19
20
from torch.autograd import Variable
from torch.nn.parameter import Parameter
21

22
23
24
from megatron import get_args
from megatron import mpu

25

26
27
_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
28
_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor)
29
30


mohammad's avatar
mohammad committed
31
32
33
34
35
36

def param_is_not_shared(param):
    return not hasattr(param, 'shared') or not param.shared



37
class MegatronModule(torch.nn.Module):
38
39
    """Megatron specific extensions of torch Module with support
    for pipelining."""
40

41
    def __init__(self, share_word_embeddings=True):
42
        super(MegatronModule, self).__init__()
43
44
        self.share_word_embeddings = share_word_embeddings

45
46
47
48
49
50

    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):
        """Use this function to override the state dict for
        saving checkpoints."""
        return self.state_dict(destination, prefix, keep_vars)
51
52
53


    def word_embeddings_weight(self):
54
55
        if not mpu.is_pipeline_last_stage(ignore_virtual=True) or \
                mpu.get_pipeline_model_parallel_world_size() == 1:
56
            return self.language_model.embedding.word_embeddings.weight
57
        else:
58
            if not self.share_word_embeddings:
59
60
                raise Exception('word_embeddings_weight() called for last '
                                'stage, but share_word_embeddings is false')
61
62
            return self.word_embeddings.weight

63

64
65
    def initialize_word_embeddings(self, init_method_normal):
        args = get_args()
66
67
68
        if not self.share_word_embeddings:
            raise Exception('initialize_word_embeddings() was called but '
                            'share_word_embeddings is false')
69
70

        # This function just initializes the word embeddings in the final stage
71
72
        # when we are using pipeline parallelism. Nothing to do if we aren't
        # using pipeline parallelism.
Jared Casper's avatar
Jared Casper committed
73
74
        if args.pipeline_model_parallel_size == 1:
            return
75

76
        # Parameters are shared between the word embeddings layers, and the
77
78
79
80
81
82
83
84
85
86
87
        # heads at the end of the model. In a pipelined setup with more than
        # one stage, the initial embedding layer and the head are on different
        # workers, so we do the following:
        # 1. Create a second copy of word_embeddings on the last stage, with
        #    initial parameters of 0.0.
        # 2. Do an all-reduce between the first and last stage to ensure that
        #    the two copies of word_embeddings start off with the same
        #    parameter values.
        # 3. In the training loop, before an all-reduce between the grads of
        #    the two word_embeddings layers to ensure that every applied weight
        #    update is the same on both stages.
88
        if mpu.is_pipeline_last_stage():
89
90
91
92
93
94
95
96
97
98
            assert not mpu.is_pipeline_first_stage()
            self._word_embeddings_for_head_key = 'word_embeddings_for_head'
            # set word_embeddings weights to 0 here, then copy first
            # stage's weights using all_reduce below.
            self.word_embeddings = mpu.VocabParallelEmbedding(
                args.padded_vocab_size, args.hidden_size,
                init_method=init_method_normal(args.init_method_std))
            self.word_embeddings.weight.data.fill_(0)
            self.word_embeddings.weight.shared = True

99
100
101
102
103
104
105
        # Zero out initial weights for decoder embedding.
        # NOTE: We don't currently support T5 with the interleaved schedule.
        if not mpu.is_pipeline_first_stage(ignore_virtual=True) and \
                not mpu.is_pipeline_last_stage(ignore_virtual=True) and \
                mpu.is_rank_in_embedding_group():
            self.language_model.embedding.zero_parameters()

106
107
        # Ensure that first and last stages have the same initial parameter
        # values.
108
        if torch.distributed.is_initialized():
109
            if mpu.is_rank_in_embedding_group():
110
111
                torch.distributed.all_reduce(self.word_embeddings_weight().data,
                                             group=mpu.get_embedding_group())
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
                # All-reduce other embeddings as well as necessary. The last stage
                # does not have these other embeddings, so just create placeholder
                # tensors of the right shape with all zeros.
                # NOTE: We don't currently support T5 with the interleaved schedule.
                if args.pipeline_model_parallel_split_rank is not None:
                    # TODO: Support tokentype embedding.
                    dimensions = (args.max_position_embeddings, args.hidden_size)
                    if mpu.is_pipeline_last_stage(ignore_virtual=True):
                        position_embeddings = torch.nn.Embedding(*dimensions).cuda()
                        position_embeddings.weight.data.fill_(0)
                    else:
                        self.language_model.embedding.cuda()
                        position_embeddings = self.language_model.embedding.position_embeddings
                    torch.distributed.all_reduce(position_embeddings.weight.data,
                                                 group=mpu.get_embedding_group())
127
128
129
130
131
132
        else:
            print("WARNING! Distributed processes aren't initialized, so "
                  "word embeddings in the last layer are not initialized. "
                  "If you are just manipulating a model this is fine, but "
                  "this needs to be handled manually. If you are training "
                  "something is definitely wrong.")
133

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
134

135
136
137
138
139
140
141
142
143
144
145
def conversion_helper(val, conversion):
    """Apply conversion to val. Recursively apply conversion if `val`
    #is a nested tuple/list structure."""
    if not isinstance(val, (tuple, list)):
        return conversion(val)
    rtn = [conversion_helper(v, conversion) for v in val]
    if isinstance(val, tuple):
        rtn = tuple(rtn)
    return rtn


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
146
147
def fp32_to_float16(val, float16_convertor):
    """Convert fp32 `val` to fp16/bf16"""
148
149
150
151
152
    def half_conversion(val):
        val_typecheck = val
        if isinstance(val_typecheck, (Parameter, Variable)):
            val_typecheck = val.data
        if isinstance(val_typecheck, _FLOAT_TYPES):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
153
            val = float16_convertor(val)
154
155
156
157
        return val
    return conversion_helper(val, half_conversion)


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
158
159
def float16_to_fp32(val):
    """Convert fp16/bf16 `val` to fp32"""
160
161
162
163
    def float_conversion(val):
        val_typecheck = val
        if isinstance(val_typecheck, (Parameter, Variable)):
            val_typecheck = val.data
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
164
        if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
165
166
167
168
169
170
            val = val.float()
        return val
    return conversion_helper(val, float_conversion)



Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
class Float16Module(MegatronModule):

    def __init__(self, module, args):
        super(Float16Module, self).__init__()

        if args.fp16:
            self.add_module('module', module.half())
            def float16_convertor(val):
                return val.half()
        elif args.bf16:
            self.add_module('module', module.bfloat16())
            def float16_convertor(val):
                return val.bfloat16()
        else:
            raise Exception('should not be here')
186

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
187
        self.float16_convertor = float16_convertor
188
189


mshoeybi's avatar
working  
mshoeybi committed
190
191
192
193
    def set_input_tensor(self, input_tensor):
        return self.module.set_input_tensor(input_tensor)


194
195
    def forward(self, *inputs, **kwargs):
        if mpu.is_pipeline_first_stage():
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
196
            inputs = fp32_to_float16(inputs, self.float16_convertor)
197
198
        outputs = self.module(*inputs, **kwargs)
        if mpu.is_pipeline_last_stage():
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
199
            outputs = float16_to_fp32(outputs)
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
        return outputs


    def state_dict(self, destination=None, prefix='', keep_vars=False):
        return self.module.state_dict(destination, prefix, keep_vars)


    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):
        return self.module.state_dict_for_save_checkpoint(destination, prefix,
                                                          keep_vars)


    def load_state_dict(self, state_dict, strict=True):
        self.module.load_state_dict(state_dict, strict=strict)