bert_model.py 9.03 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
3
4
5
6

"""BERT model."""

import torch

Mohammad's avatar
Mohammad committed
7
from megatron import get_args
mohammad's avatar
mohammad committed
8
from megatron import mpu
9
from megatron.model.enums import AttnMaskType
10
11
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
12
from megatron.model import LayerNorm
13
from megatron.model.utils import openai_gelu, erf_gelu
14
15
16
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
17
from .module import MegatronModule
18

19
def bert_extended_attention_mask(attention_mask):
Neel Kant's avatar
Neel Kant committed
20
21
22
23
24
25
26
27
28
29
    # We create a 3D attention mask from a 2D tensor mask.
    # [b, 1, s]
    attention_mask_b1s = attention_mask.unsqueeze(1)
    # [b, s, 1]
    attention_mask_bs1 = attention_mask.unsqueeze(2)
    # [b, s, s]
    attention_mask_bss = attention_mask_b1s * attention_mask_bs1
    # [b, 1, s, s]
    extended_attention_mask = attention_mask_bss.unsqueeze(1)

30
31
    # Convert attention mask to binary:
    extended_attention_mask = (extended_attention_mask < 0.5)
Neel Kant's avatar
Neel Kant committed
32

33
    return extended_attention_mask
Neel Kant's avatar
Neel Kant committed
34
35
36
37
38
39
40
41
42
43
44

def bert_position_ids(token_ids):
    # Create position ids
    seq_length = token_ids.size(1)
    position_ids = torch.arange(seq_length, dtype=torch.long,
                                device=token_ids.device)
    position_ids = position_ids.unsqueeze(0).expand_as(token_ids)

    return position_ids


45
46
47
48
49
50
51
52
class BertLMHead(MegatronModule):
    """Masked LM head for Bert

    Arguments:
        mpu_vocab_size: model parallel size of vocabulary.
        hidden_size: hidden size
        init_method: init method for weight initialization
        layernorm_epsilon: tolerance for layer norm divisions
53
        parallel_output: whether output logits being distributed or not.
54
    """
Neel Kant's avatar
Neel Kant committed
55

56
57
58
59
60
    def __init__(self, mpu_vocab_size, hidden_size, init_method,
                 layernorm_epsilon, parallel_output):

        super(BertLMHead, self).__init__()

61
        args = get_args()
Neel Kant's avatar
Neel Kant committed
62

63
        self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
64
        mpu.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
65
66
67
        self.parallel_output = parallel_output

        self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
68
69
70
        setattr(self.dense.weight, 'sequence_parallel', args.sequence_parallel)
        setattr(self.dense.bias, 'sequence_parallel', args.sequence_parallel)

71
        self.layernorm = LayerNorm(hidden_size,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
72
73
                                   eps=layernorm_epsilon,
                                   sequence_parallel=args.sequence_parallel)
74
75
76
        self.gelu = torch.nn.functional.gelu
        if args.openai_gelu:
            self.gelu = openai_gelu
77
        elif args.onnx_safe:
Boris Fomitchev's avatar
Boris Fomitchev committed
78
            self.gelu = erf_gelu
79
80
81

    def forward(self, hidden_states, word_embeddings_weight):
        hidden_states = self.dense(hidden_states)
82
        hidden_states = self.gelu(hidden_states)
83
84
85
86
87
88
89
90
        hidden_states = self.layernorm(hidden_states)
        output = parallel_lm_logits(hidden_states,
                                    word_embeddings_weight,
                                    self.parallel_output,
                                    bias=self.bias)
        return output


91
92
93
94
95
96
97
98
99
100
def post_language_model_processing(lm_output, pooled_output,
                                   lm_head, binary_head,
                                   lm_labels,
                                   logit_weights,
                                   fp16_lm_cross_entropy):
    # Output.
    lm_logits = lm_head(
        lm_output, logit_weights)

    binary_logits = None
101
    if binary_head is not None:
102
103
104
        binary_logits = binary_head(pooled_output)

    if lm_labels is None:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
105
106
        # [s b h] => [b s h]
        return lm_logits.transpose(0,1).contiguous(), binary_logits
107
    else:
Vijay Korthikanti's avatar
Vijay Korthikanti committed
108
        # [b s] => [s b]
Vijay Korthikanti's avatar
Vijay Korthikanti committed
109
110
        lm_labels = lm_labels.transpose(0,1).contiguous()
        # lm_logits : [s, b, h] and lm_labels: [s, b]
111
112
113
114
115
116
        if fp16_lm_cross_entropy:
            assert lm_logits.dtype == torch.half
            lm_loss = mpu.vocab_parallel_cross_entropy(lm_logits, lm_labels)
        else:
            lm_loss = mpu.vocab_parallel_cross_entropy(lm_logits.float(),
                                                       lm_labels)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
117
118
        # [s, b] => [b s]
        lm_loss = lm_loss.transpose(0,1).contiguous()
119
120
121
        return lm_loss, binary_logits


122
class BertModel(MegatronModule):
123
124
    """Bert Language model."""

125
126
    def __init__(self,
                 num_tokentypes=2,
127
128
129
130
131
                 add_binary_head=True,
                 parallel_output=True,
                 pre_process=True,
                 post_process=True):
        super(BertModel, self).__init__()
Mohammad's avatar
Mohammad committed
132
        args = get_args()
133

mohammad's avatar
mohammad committed
134
        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
135
136
        self.add_binary_head = add_binary_head
        self.parallel_output = parallel_output
137
138
        self.pre_process = pre_process
        self.post_process = post_process
139

Mohammad's avatar
Mohammad committed
140
141
142
        init_method = init_method_normal(args.init_method_std)
        scaled_init_method = scaled_init_method_normal(args.init_method_std,
                                                       args.num_layers)
Neel Kant's avatar
Neel Kant committed
143

144
145
        self.language_model, self._language_model_key = get_language_model(
            num_tokentypes=num_tokentypes,
146
            add_pooler=self.add_binary_head,
147
            encoder_attn_mask_type=AttnMaskType.padding,
148
            init_method=init_method,
149
150
151
            scaled_init_method=scaled_init_method,
            pre_process=self.pre_process,
            post_process=self.post_process)
152

153
        self.initialize_word_embeddings(init_method_normal)
154
        if self.post_process:
155
156
157
158
159
160
161
162
163
164
            self.lm_head = BertLMHead(
                self.word_embeddings_weight().size(0),
                args.hidden_size, init_method, args.layernorm_epsilon, parallel_output)
            self._lm_head_key = 'lm_head'
            self.binary_head = None
            if self.add_binary_head:
                self.binary_head = get_linear_layer(args.hidden_size, 2,
                                                    init_method)
                self._binary_head_key = 'binary_head'

165
    def set_input_tensor(self, input_tensor):
166
        """See megatron.model.transformer.set_input_tensor()"""
167
168
        self.language_model.set_input_tensor(input_tensor)

169
    def forward(self, bert_model_input, attention_mask,
mohammad's avatar
mohammad committed
170
                tokentype_ids=None, lm_labels=None):
171

172
        extended_attention_mask = bert_extended_attention_mask(attention_mask)
173
174
        input_ids = bert_model_input
        position_ids = bert_position_ids(input_ids)
175

176
177
178
179
180
181
182
183
        lm_output = self.language_model(
            input_ids,
            position_ids,
            extended_attention_mask,
            tokentype_ids=tokentype_ids
        )

        if self.post_process and self.add_binary_head:
184
            lm_output, pooled_output = lm_output
mohammad's avatar
mohammad committed
185
        else:
186
187
            pooled_output = None

188
        if self.post_process:
189
190
191
192
193
194
195
            return post_language_model_processing(lm_output, pooled_output,
                                                  self.lm_head, self.binary_head,
                                                  lm_labels,
                                                  self.word_embeddings_weight(),
                                                  self.fp16_lm_cross_entropy)
        else:
            return lm_output
196
197


198
    def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
199
200
201
202
203
        """For easy load when model is combined with other heads,
        add an extra key."""

        state_dict_ = {}
        state_dict_[self._language_model_key] \
204
205
            = self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
                                                                 keep_vars=keep_vars)
206
        if self.post_process:
207
            state_dict_[self._lm_head_key] \
208
209
                = self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
                                                              keep_vars=keep_vars)
210
        if self.post_process and self.add_binary_head:
211
            state_dict_[self._binary_head_key] \
212
                = self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars)
213
        # Save word_embeddings.
214
        if self.post_process and not self.pre_process:
215
            state_dict_[self._word_embeddings_for_head_key] \
216
                = self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars)
217
218
219
220
221
222
223
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

        self.language_model.load_state_dict(
            state_dict[self._language_model_key], strict=strict)
224
        if self.post_process:
225
226
            self.lm_head.load_state_dict(
                state_dict[self._lm_head_key], strict=strict)
227
        if self.post_process and self.add_binary_head:
Neel Kant's avatar
Neel Kant committed
228
229
            self.binary_head.load_state_dict(
                state_dict[self._binary_head_key], strict=strict)
230
        # Load word_embeddings.
231
        if self.post_process and not self.pre_process:
232
233
            self.word_embeddings.load_state_dict(
                state_dict[self._word_embeddings_for_head_key], strict=strict)