gpt2_model.py 9.23 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""GPT-2 model."""

import torch

Mohammad's avatar
Mohammad committed
20
from megatron import get_args
21
from megatron import mpu
22
23
24
25
26
27
28
29
30
from megatron.module import MegatronModule

from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal


def gpt2_attention_mask_func(attention_scores, ltor_mask):
31
    attention_scores.masked_fill_(ltor_mask, -10000.0)
32
33
34
    return attention_scores


35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def post_language_model_processing(lm_output, labels, logit_weights,
                                   get_key_value, parallel_output,
                                   forward_method_parallel_output,
                                   fp16_lm_cross_entropy):
    if get_key_value:
        lm_output, presents = lm_output

    # Output.
    if forward_method_parallel_output is not None:
        parallel_output = forward_method_parallel_output
    output = parallel_lm_logits(
        lm_output,
        logit_weights,
        parallel_output)

    if get_key_value:
        output = [output, presents]

    if labels is None:
        return output
    else:
        if fp16_lm_cross_entropy:
            assert output.dtype == torch.half
            loss = mpu.vocab_parallel_cross_entropy(output, labels)
        else:
            loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
        return loss


class GPT2ModelBase(MegatronModule):
65
66
    """GPT-2 Language model."""

Mohammad's avatar
Mohammad committed
67
    def __init__(self, num_tokentypes=0, parallel_output=True):
68
        super(GPT2ModelBase, self).__init__()
Mohammad's avatar
Mohammad committed
69
        args = get_args()
70
71

        self.parallel_output = parallel_output
mohammad's avatar
mohammad committed
72
        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
73
74

        self.language_model, self._language_model_key = get_language_model(
Mohammad's avatar
Mohammad committed
75
            attention_mask_func=gpt2_attention_mask_func,
76
77
            num_tokentypes=num_tokentypes,
            add_pooler=False,
Mohammad's avatar
Mohammad committed
78
79
80
            init_method=init_method_normal(args.init_method_std),
            scaled_init_method=scaled_init_method_normal(args.init_method_std,
                                                         args.num_layers))
81

82
83
84
85
86
87
88
89
90
91
92
        # Parameters are shared between the word embeddings layer, and the heads at
        # the end of the model. In a pipelined setup with more than one stage, the
        # initial embedding layer and the head are on different workers, so we do
        # the following:
        # 1. Create a second copy of word_embeddings on the last stage, with initial
        #    parameters of 0.0.
        # 2. Do an all-reduce between the first and last stage to ensure that the
        #    two copies of word_embeddings start off with the same parameter values.
        # 3. In the training loop, before an all-reduce between the grads of the two
        #    word_embeddings layers to ensure that every applied weight update is the
        #    same on both stages.
93
94
        if mpu.is_pipeline_last_stage():
            if not mpu.is_pipeline_first_stage():
95
96
97
98
99
100
101
102
                self._word_embeddings_for_head_key = 'word_embeddings_for_head'
                # If first and last stages are different, set word_embeddings
                # weights to 0 here, then copy first stage's weights using all_reduce
                # below.
                self.word_embeddings = mpu.VocabParallelEmbedding(
                    args.padded_vocab_size, args.hidden_size,
                    init_method=init_method_normal(args.init_method_std))
                self.word_embeddings.weight.data.fill_(0)
103
        # Ensure that first and last stages have the same initial parameter values.
104
        if mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage():
105
106
107
108
            torch.distributed.all_reduce(self.word_embeddings_weight().data,
                                         group=mpu.get_embedding_group())

    def word_embeddings_weight(self):
109
        if mpu.is_pipeline_first_stage():
110
            return self.language_model.embedding.word_embeddings.weight
111
        if mpu.is_pipeline_last_stage():
112
113
114
115
116
            return self.word_embeddings.weight
        raise Exception('word_embeddings_weight() should be '
                        'called for first and last stage only')

    def forward(self, gpt2_model_input, attention_mask, labels=None,
117
118
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
119

120
        kwargs = {'layer_past': layer_past, 'get_key_value': get_key_value}
121
        if mpu.is_pipeline_first_stage():
122
123
124
            (input_ids, position_ids) = gpt2_model_input
            args = [input_ids, position_ids, attention_mask]
            kwargs['tokentype_ids'] = tokentype_ids
125
        else:
126
127
128
            args = [gpt2_model_input, attention_mask]
        lm_output = self.language_model(*args, **kwargs)

129
        if mpu.is_pipeline_last_stage():
130
131
132
133
134
135
136
137
138
            return post_language_model_processing(
                lm_output, labels,
                self.word_embeddings_weight(),
                get_key_value,
                self.parallel_output,
                forward_method_parallel_output,
                self.fp16_lm_cross_entropy)
        else:
            return lm_output
139
140
141
142
143
144
145
146

    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):

        state_dict_ = {}
        state_dict_[self._language_model_key] \
            = self.language_model.state_dict_for_save_checkpoint(
                destination, prefix, keep_vars)
147
        # Save word_embeddings.
148
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
149
150
            state_dict_[self._word_embeddings_for_head_key] \
                = self.word_embeddings.state_dict(destination, prefix, keep_vars)
151
152
153
154
155
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

156
        # Load word_embeddings.
157
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
158
159
            self.word_embeddings.load_state_dict(
                state_dict[self._word_embeddings_for_head_key], strict=strict)
160
161
162
        if self._language_model_key in state_dict:
            state_dict = state_dict[self._language_model_key]
        self.language_model.load_state_dict(state_dict, strict=strict)
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232


class GPT2Model(GPT2ModelBase):

    def __init__(self, num_tokentypes=0, parallel_output=True):
        super(GPT2Model, self).__init__(
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, input_ids, position_ids, attention_mask, labels=None,
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
        return super(GPT2Model, self).forward(
            (input_ids, position_ids),
            attention_mask,
            labels=labels,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)


class GPT2ModelFirstStage(GPT2ModelBase):

    def __init__(self, num_tokentypes=0):
        super(GPT2ModelFirstStage, self).__init__(
            num_tokentypes=num_tokentypes)

    def forward(self, input_ids, position_ids, attention_mask,
                tokentype_ids=None, layer_past=None, get_key_value=False):
        return super(GPT2ModelFirstStage, self).forward(
            (input_ids, position_ids),
            attention_mask,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value)


class GPT2ModelIntermediateStage(GPT2ModelBase):

    def __init__(self, num_tokentypes=0):
        super(GPT2ModelIntermediateStage, self).__init__(
            num_tokentypes=num_tokentypes)

    def forward(self, hidden_state, attention_mask,
                layer_past=None, get_key_value=False):
        return super(GPT2ModelIntermediateStage, self).forward(
            hidden_state,
            attention_mask,
            layer_past=layer_past,
            get_key_value=get_key_value)


class GPT2ModelLastStage(GPT2ModelBase):

    def __init__(self, num_tokentypes=0, parallel_output=True):
        super(GPT2ModelLastStage, self).__init__(
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, hidden_state, attention_mask, labels=None,
                layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
        return super(GPT2ModelLastStage, self).forward(
            hidden_state,
            attention_mask,
            labels=labels,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)