gpt_model.py 7.13 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""GPT-2 model."""

import torch

Mohammad's avatar
Mohammad committed
20
from megatron import get_args
21
from megatron import mpu
22
from .module import MegatronModule
23

24
from .enums import AttnMaskType
25
26
27
28
29
30
from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal


31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def post_language_model_processing(lm_output, labels, logit_weights,
                                   get_key_value, parallel_output,
                                   forward_method_parallel_output,
                                   fp16_lm_cross_entropy):
    if get_key_value:
        lm_output, presents = lm_output

    # Output.
    if forward_method_parallel_output is not None:
        parallel_output = forward_method_parallel_output
    output = parallel_lm_logits(
        lm_output,
        logit_weights,
        parallel_output)

    if get_key_value:
        output = [output, presents]

    if labels is None:
        return output
    else:
        if fp16_lm_cross_entropy:
            assert output.dtype == torch.half
            loss = mpu.vocab_parallel_cross_entropy(output, labels)
        else:
            loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
        return loss


60
class GPTModelBase(MegatronModule):
61
62
    """GPT-2 Language model."""

Mohammad's avatar
Mohammad committed
63
    def __init__(self, num_tokentypes=0, parallel_output=True):
64
        super(GPTModelBase, self).__init__()
Mohammad's avatar
Mohammad committed
65
        args = get_args()
66
67

        self.parallel_output = parallel_output
mohammad's avatar
mohammad committed
68
        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
69
70
71
72

        self.language_model, self._language_model_key = get_language_model(
            num_tokentypes=num_tokentypes,
            add_pooler=False,
73
            encoder_attn_mask_type=AttnMaskType.causal,
Mohammad's avatar
Mohammad committed
74
75
76
            init_method=init_method_normal(args.init_method_std),
            scaled_init_method=scaled_init_method_normal(args.init_method_std,
                                                         args.num_layers))
77

78
        self.initialize_word_embeddings(init_method_normal)
79

80
    def forward(self, gpt_model_input, attention_mask, labels=None,
81
82
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
83

84
        kwargs = {'layer_past': layer_past, 'get_key_value': get_key_value}
85
        if mpu.is_pipeline_first_stage():
86
            (input_ids, position_ids) = gpt_model_input
87
88
            args = [input_ids, position_ids, attention_mask]
            kwargs['tokentype_ids'] = tokentype_ids
89
        else:
90
            args = [gpt_model_input, attention_mask]
91
92
        lm_output = self.language_model(*args, **kwargs)

93
        if mpu.is_pipeline_last_stage():
94
95
96
97
98
99
100
101
102
            return post_language_model_processing(
                lm_output, labels,
                self.word_embeddings_weight(),
                get_key_value,
                self.parallel_output,
                forward_method_parallel_output,
                self.fp16_lm_cross_entropy)
        else:
            return lm_output
103
104
105
106
107
108
109
110

    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):

        state_dict_ = {}
        state_dict_[self._language_model_key] \
            = self.language_model.state_dict_for_save_checkpoint(
                destination, prefix, keep_vars)
111
        # Save word_embeddings.
112
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
113
114
            state_dict_[self._word_embeddings_for_head_key] \
                = self.word_embeddings.state_dict(destination, prefix, keep_vars)
115
116
117
118
119
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

120
        # Load word_embeddings.
121
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
122
123
            self.word_embeddings.load_state_dict(
                state_dict[self._word_embeddings_for_head_key], strict=strict)
124
125
126
        if self._language_model_key in state_dict:
            state_dict = state_dict[self._language_model_key]
        self.language_model.load_state_dict(state_dict, strict=strict)
127
128


129
class GPTModel(GPTModelBase):
130
131

    def __init__(self, num_tokentypes=0, parallel_output=True):
132
        super(GPTModel, self).__init__(
133
134
135
136
137
138
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, input_ids, position_ids, attention_mask, labels=None,
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
139
        return super(GPTModel, self).forward(
140
141
142
143
144
145
146
147
148
            (input_ids, position_ids),
            attention_mask,
            labels=labels,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)


149
class GPTModelFirstStage(GPTModelBase):
150
151

    def __init__(self, num_tokentypes=0):
152
        super(GPTModelFirstStage, self).__init__(
153
154
155
156
            num_tokentypes=num_tokentypes)

    def forward(self, input_ids, position_ids, attention_mask,
                tokentype_ids=None, layer_past=None, get_key_value=False):
157
        return super(GPTModelFirstStage, self).forward(
158
159
160
161
162
163
164
            (input_ids, position_ids),
            attention_mask,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value)


165
class GPTModelIntermediateStage(GPTModelBase):
166
167

    def __init__(self, num_tokentypes=0):
168
        super(GPTModelIntermediateStage, self).__init__(
169
170
171
172
            num_tokentypes=num_tokentypes)

    def forward(self, hidden_state, attention_mask,
                layer_past=None, get_key_value=False):
173
        return super(GPTModelIntermediateStage, self).forward(
174
175
176
177
178
179
            hidden_state,
            attention_mask,
            layer_past=layer_past,
            get_key_value=get_key_value)


180
class GPTModelLastStage(GPTModelBase):
181
182

    def __init__(self, num_tokentypes=0, parallel_output=True):
183
        super(GPTModelLastStage, self).__init__(
184
185
186
187
188
189
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, hidden_state, attention_mask, labels=None,
                layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
190
        return super(GPTModelLastStage, self).forward(
191
192
193
194
195
196
            hidden_state,
            attention_mask,
            labels=labels,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)