gpt_model.py 7.33 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""GPT-2 model."""

import torch

Mohammad's avatar
Mohammad committed
20
from megatron import get_args
21
from megatron import mpu
22
from .module import MegatronModule
23

24
from .enums import AttnMaskType
25
26
27
28
29
30
from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal


31
def gpt_attention_mask_func(attention_scores, ltor_mask):
32
    attention_scores.masked_fill_(ltor_mask, -10000.0)
33
34
35
    return attention_scores


36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def post_language_model_processing(lm_output, labels, logit_weights,
                                   get_key_value, parallel_output,
                                   forward_method_parallel_output,
                                   fp16_lm_cross_entropy):
    if get_key_value:
        lm_output, presents = lm_output

    # Output.
    if forward_method_parallel_output is not None:
        parallel_output = forward_method_parallel_output
    output = parallel_lm_logits(
        lm_output,
        logit_weights,
        parallel_output)

    if get_key_value:
        output = [output, presents]

    if labels is None:
        return output
    else:
        if fp16_lm_cross_entropy:
            assert output.dtype == torch.half
            loss = mpu.vocab_parallel_cross_entropy(output, labels)
        else:
            loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
        return loss


65
class GPTModelBase(MegatronModule):
66
67
    """GPT-2 Language model."""

Mohammad's avatar
Mohammad committed
68
    def __init__(self, num_tokentypes=0, parallel_output=True):
69
        super(GPTModelBase, self).__init__()
Mohammad's avatar
Mohammad committed
70
        args = get_args()
71
72

        self.parallel_output = parallel_output
mohammad's avatar
mohammad committed
73
        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
74
75

        self.language_model, self._language_model_key = get_language_model(
76
            attention_mask_func=gpt_attention_mask_func,
77
78
            num_tokentypes=num_tokentypes,
            add_pooler=False,
79
            encoder_attn_mask_type=AttnMaskType.causal,
Mohammad's avatar
Mohammad committed
80
81
82
            init_method=init_method_normal(args.init_method_std),
            scaled_init_method=scaled_init_method_normal(args.init_method_std,
                                                         args.num_layers))
83

84
        self.initialize_word_embeddings(init_method_normal)
85

86
    def forward(self, gpt_model_input, attention_mask, labels=None,
87
88
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
89

90
        kwargs = {'layer_past': layer_past, 'get_key_value': get_key_value}
91
        if mpu.is_pipeline_first_stage():
92
            (input_ids, position_ids) = gpt_model_input
93
94
            args = [input_ids, position_ids, attention_mask]
            kwargs['tokentype_ids'] = tokentype_ids
95
        else:
96
            args = [gpt_model_input, attention_mask]
97
98
        lm_output = self.language_model(*args, **kwargs)

99
        if mpu.is_pipeline_last_stage():
100
101
102
103
104
105
106
107
108
            return post_language_model_processing(
                lm_output, labels,
                self.word_embeddings_weight(),
                get_key_value,
                self.parallel_output,
                forward_method_parallel_output,
                self.fp16_lm_cross_entropy)
        else:
            return lm_output
109
110
111
112
113
114
115
116

    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):

        state_dict_ = {}
        state_dict_[self._language_model_key] \
            = self.language_model.state_dict_for_save_checkpoint(
                destination, prefix, keep_vars)
117
        # Save word_embeddings.
118
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
119
120
            state_dict_[self._word_embeddings_for_head_key] \
                = self.word_embeddings.state_dict(destination, prefix, keep_vars)
121
122
123
124
125
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

126
        # Load word_embeddings.
127
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
128
129
            self.word_embeddings.load_state_dict(
                state_dict[self._word_embeddings_for_head_key], strict=strict)
130
131
132
        if self._language_model_key in state_dict:
            state_dict = state_dict[self._language_model_key]
        self.language_model.load_state_dict(state_dict, strict=strict)
133
134


135
class GPTModel(GPTModelBase):
136
137

    def __init__(self, num_tokentypes=0, parallel_output=True):
138
        super(GPTModel, self).__init__(
139
140
141
142
143
144
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, input_ids, position_ids, attention_mask, labels=None,
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
145
        return super(GPTModel, self).forward(
146
147
148
149
150
151
152
153
154
            (input_ids, position_ids),
            attention_mask,
            labels=labels,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)


155
class GPTModelFirstStage(GPTModelBase):
156
157

    def __init__(self, num_tokentypes=0):
158
        super(GPTModelFirstStage, self).__init__(
159
160
161
162
            num_tokentypes=num_tokentypes)

    def forward(self, input_ids, position_ids, attention_mask,
                tokentype_ids=None, layer_past=None, get_key_value=False):
163
        return super(GPTModelFirstStage, self).forward(
164
165
166
167
168
169
170
            (input_ids, position_ids),
            attention_mask,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value)


171
class GPTModelIntermediateStage(GPTModelBase):
172
173

    def __init__(self, num_tokentypes=0):
174
        super(GPTModelIntermediateStage, self).__init__(
175
176
177
178
            num_tokentypes=num_tokentypes)

    def forward(self, hidden_state, attention_mask,
                layer_past=None, get_key_value=False):
179
        return super(GPTModelIntermediateStage, self).forward(
180
181
182
183
184
185
            hidden_state,
            attention_mask,
            layer_past=layer_past,
            get_key_value=get_key_value)


186
class GPTModelLastStage(GPTModelBase):
187
188

    def __init__(self, num_tokentypes=0, parallel_output=True):
189
        super(GPTModelLastStage, self).__init__(
190
191
192
193
194
195
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, hidden_state, attention_mask, labels=None,
                layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
196
        return super(GPTModelLastStage, self).forward(
197
198
199
200
201
202
            hidden_state,
            attention_mask,
            labels=labels,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)