gpt_model.py 7.05 KB
Newer Older
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""GPT-2 model."""

import torch

Mohammad's avatar
Mohammad committed
20
from megatron import get_args
21
from megatron import mpu
22
from .module import MegatronModule
23
24
25
26
27
28
29

from .language_model import parallel_lm_logits
from .language_model import get_language_model
from .utils import init_method_normal
from .utils import scaled_init_method_normal


30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def post_language_model_processing(lm_output, labels, logit_weights,
                                   get_key_value, parallel_output,
                                   forward_method_parallel_output,
                                   fp16_lm_cross_entropy):
    if get_key_value:
        lm_output, presents = lm_output

    # Output.
    if forward_method_parallel_output is not None:
        parallel_output = forward_method_parallel_output
    output = parallel_lm_logits(
        lm_output,
        logit_weights,
        parallel_output)

    if get_key_value:
        output = [output, presents]

    if labels is None:
        return output
    else:
        if fp16_lm_cross_entropy:
            assert output.dtype == torch.half
            loss = mpu.vocab_parallel_cross_entropy(output, labels)
        else:
            loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
        return loss


59
class GPTModelBase(MegatronModule):
60
61
    """GPT-2 Language model."""

Mohammad's avatar
Mohammad committed
62
    def __init__(self, num_tokentypes=0, parallel_output=True):
63
        super(GPTModelBase, self).__init__()
Mohammad's avatar
Mohammad committed
64
        args = get_args()
65
66

        self.parallel_output = parallel_output
mohammad's avatar
mohammad committed
67
        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
68
69
70
71

        self.language_model, self._language_model_key = get_language_model(
            num_tokentypes=num_tokentypes,
            add_pooler=False,
Mohammad's avatar
Mohammad committed
72
73
74
            init_method=init_method_normal(args.init_method_std),
            scaled_init_method=scaled_init_method_normal(args.init_method_std,
                                                         args.num_layers))
75

76
        self.initialize_word_embeddings(init_method_normal)
77

78
    def forward(self, gpt_model_input, attention_mask, labels=None,
79
80
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
81

82
        kwargs = {'layer_past': layer_past, 'get_key_value': get_key_value}
83
        if mpu.is_pipeline_first_stage():
84
            (input_ids, position_ids) = gpt_model_input
85
86
            args = [input_ids, position_ids, attention_mask]
            kwargs['tokentype_ids'] = tokentype_ids
87
        else:
88
            args = [gpt_model_input, attention_mask]
89
90
        lm_output = self.language_model(*args, **kwargs)

91
        if mpu.is_pipeline_last_stage():
92
93
94
95
96
97
98
99
100
            return post_language_model_processing(
                lm_output, labels,
                self.word_embeddings_weight(),
                get_key_value,
                self.parallel_output,
                forward_method_parallel_output,
                self.fp16_lm_cross_entropy)
        else:
            return lm_output
101
102
103
104
105
106
107
108

    def state_dict_for_save_checkpoint(self, destination=None, prefix='',
                                       keep_vars=False):

        state_dict_ = {}
        state_dict_[self._language_model_key] \
            = self.language_model.state_dict_for_save_checkpoint(
                destination, prefix, keep_vars)
109
        # Save word_embeddings.
110
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
111
112
            state_dict_[self._word_embeddings_for_head_key] \
                = self.word_embeddings.state_dict(destination, prefix, keep_vars)
113
114
115
116
117
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

118
        # Load word_embeddings.
119
        if mpu.is_pipeline_last_stage() and not mpu.is_pipeline_first_stage():
120
121
            self.word_embeddings.load_state_dict(
                state_dict[self._word_embeddings_for_head_key], strict=strict)
122
123
124
        if self._language_model_key in state_dict:
            state_dict = state_dict[self._language_model_key]
        self.language_model.load_state_dict(state_dict, strict=strict)
125
126


127
class GPTModel(GPTModelBase):
128
129

    def __init__(self, num_tokentypes=0, parallel_output=True):
130
        super(GPTModel, self).__init__(
131
132
133
134
135
136
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, input_ids, position_ids, attention_mask, labels=None,
                tokentype_ids=None, layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
137
        return super(GPTModel, self).forward(
138
139
140
141
142
143
144
145
146
            (input_ids, position_ids),
            attention_mask,
            labels=labels,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)


147
class GPTModelFirstStage(GPTModelBase):
148
149

    def __init__(self, num_tokentypes=0):
150
        super(GPTModelFirstStage, self).__init__(
151
152
153
154
            num_tokentypes=num_tokentypes)

    def forward(self, input_ids, position_ids, attention_mask,
                tokentype_ids=None, layer_past=None, get_key_value=False):
155
        return super(GPTModelFirstStage, self).forward(
156
157
158
159
160
161
162
            (input_ids, position_ids),
            attention_mask,
            tokentype_ids=tokentype_ids,
            layer_past=layer_past,
            get_key_value=get_key_value)


163
class GPTModelIntermediateStage(GPTModelBase):
164
165

    def __init__(self, num_tokentypes=0):
166
        super(GPTModelIntermediateStage, self).__init__(
167
168
169
170
            num_tokentypes=num_tokentypes)

    def forward(self, hidden_state, attention_mask,
                layer_past=None, get_key_value=False):
171
        return super(GPTModelIntermediateStage, self).forward(
172
173
174
175
176
177
            hidden_state,
            attention_mask,
            layer_past=layer_past,
            get_key_value=get_key_value)


178
class GPTModelLastStage(GPTModelBase):
179
180

    def __init__(self, num_tokentypes=0, parallel_output=True):
181
        super(GPTModelLastStage, self).__init__(
182
183
184
185
186
187
            num_tokentypes=num_tokentypes,
            parallel_output=parallel_output)

    def forward(self, hidden_state, attention_mask, labels=None,
                layer_past=None, get_key_value=False,
                forward_method_parallel_output=None):
188
        return super(GPTModelLastStage, self).forward(
189
190
191
192
193
194
            hidden_state,
            attention_mask,
            labels=labels,
            layer_past=layer_past,
            get_key_value=get_key_value,
            forward_method_parallel_output=forward_method_parallel_output)