gpt.py 34.7 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
# Copyright (c) 2023, Tri Dao.
Tri Dao's avatar
Tri Dao committed
2

3
import logging
Tri Dao's avatar
Tri Dao committed
4
import math
5
import re
Tri Dao's avatar
Tri Dao committed
6
7
from functools import partial

8
from collections import namedtuple, OrderedDict
Tri Dao's avatar
Tri Dao committed
9
10
11
12
13
14
from collections.abc import Sequence

import torch
import torch.nn as nn
import torch.nn.functional as F

Tri Dao's avatar
Tri Dao committed
15
from transformers import GPT2Config
Tri Dao's avatar
Tri Dao committed
16

17
18
from einops import rearrange

Tri Dao's avatar
Tri Dao committed
19
from flash_attn.ops.activations import sqrelu_fwd
20
from flash_attn.modules.mha import MHA, ParallelMHA
Tri Dao's avatar
Tri Dao committed
21
from flash_attn.modules.mlp import Mlp, GatedMlp, FusedMLP, ParallelFusedMLP
Tri Dao's avatar
Tri Dao committed
22
from flash_attn.modules.block import Block, ParallelBlock
23
from flash_attn.modules.embedding import GPT2Embeddings, ParallelGPT2Embeddings
24
from flash_attn.utils.distributed import sync_shared_params, all_gather_raw
25
from flash_attn.utils.pretrained import state_dict_from_pretrained
Tri Dao's avatar
Tri Dao committed
26
from flash_attn.utils.generation import GenerationMixin
Tri Dao's avatar
Tri Dao committed
27
28
from flash_attn.models.opt import remap_state_dict_hf_opt
from flash_attn.models.gptj import remap_state_dict_hf_gptj
Tri Dao's avatar
Tri Dao committed
29
from flash_attn.models.gpt_neox import remap_state_dict_hf_gpt_neox
30
31
32
33
34

try:
    from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
    ColumnParallelLinear = None
Tri Dao's avatar
Tri Dao committed
35
36
37
38
39
40

try:
    from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
    dropout_add_layer_norm = None

41
42
43
44
45
try:
    from flash_attn.ops.layer_norm import dropout_add_layer_norm_parallel_residual
except ImportError:
    dropout_add_layer_norm_parallel_residual = None

Tri Dao's avatar
Tri Dao committed
46
try:
Tri Dao's avatar
Tri Dao committed
47
    from flash_attn.ops.triton.mlp import FusedDenseSqreluDense
Tri Dao's avatar
Tri Dao committed
48
49
50
51
except ImportError:
    FusedDenseSqreluDense = None


52
53
54
logger = logging.getLogger(__name__)


55
56
def create_mixer_cls(config, layer_idx=None, process_group=None, device=None, dtype=None):
    factory_kwargs = {'device': device, 'dtype': dtype}
Tri Dao's avatar
Tri Dao committed
57
58
59
60
61
62
    head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
    softmax_scale = 1.0 if not config.scale_attn_weights else head_dim ** (-0.5)
    if config.scale_attn_by_inverse_layer_idx:
        assert layer_idx is not None
        softmax_scale /= float(layer_idx + 1)
    dwconv = getattr(config, 'attn_dwconv', False)
63
64
    if dwconv:
        assert process_group is None, 'TensorParallel MHA does not support dwconv yet'
Tri Dao's avatar
Tri Dao committed
65
66
    qkv_proj_bias = getattr(config, 'qkv_proj_bias', True)
    out_proj_bias = getattr(config, 'out_proj_bias', True)
Tri Dao's avatar
Tri Dao committed
67
    rotary_emb_dim = int(getattr(config, 'rotary_emb_fraction', 0.0) * head_dim)
Tri Dao's avatar
Tri Dao committed
68
69
    rotary_emb_scale_base = getattr(config, 'rotary_emb_scale_base', None)
    rotary_emb_interleaved = getattr(config, 'rotary_emb_interleaved', False)
Tri Dao's avatar
Tri Dao committed
70
71
    use_flash_attn = getattr(config, 'use_flash_attn', False)
    fused_bias_fc = getattr(config, 'fused_bias_fc', False)
72
73
74
75
76
    if not fused_bias_fc:
        assert process_group is None, 'TensorParallel MHA requires fused_bias_fc'
    mha_cls = MHA if process_group is None else ParallelMHA
    serial_kwargs = ({'fused_bias_fc': fused_bias_fc, 'dwconv': dwconv}
                     if process_group is None else {})
77
78
79
    parallel_kwargs = ({'process_group': process_group,
                        'sequence_parallel': getattr(config, 'sequence_parallel', True)}
                       if process_group is not None else {})
Tri Dao's avatar
Tri Dao committed
80
81
82
    mixer_cls = partial(mha_cls, num_heads=config.num_attention_heads,
                        qkv_proj_bias=qkv_proj_bias, out_proj_bias=out_proj_bias,
                        dropout=config.attn_pdrop,
Tri Dao's avatar
Tri Dao committed
83
                        softmax_scale=softmax_scale, causal=True, layer_idx=layer_idx,
Tri Dao's avatar
Tri Dao committed
84
                        rotary_emb_dim=rotary_emb_dim, rotary_emb_scale_base=rotary_emb_scale_base,
Tri Dao's avatar
Tri Dao committed
85
                        rotary_emb_interleaved=rotary_emb_interleaved,
86
87
                        use_flash_attn=use_flash_attn,
                        **serial_kwargs, **parallel_kwargs, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
88
89
90
    return mixer_cls


91
92
def create_mlp_cls(config, layer_idx=None, process_group=None, device=None, dtype=None):
    factory_kwargs = {'device': device, 'dtype': dtype}
93
94
    fused_mlp = getattr(config, 'fused_mlp', False)
    if fused_mlp:
95
        assert config.activation_function in ['gelu_new', 'gelu_fast', 'gelu_approx', 'relu', 'sqrelu']
Tri Dao's avatar
Tri Dao committed
96
    fused_dense_sqrelu_dense = getattr(config, 'fused_dense_sqrelu_dense', False)
97
98
99
    if fused_dense_sqrelu_dense:
        assert config.activation_function == 'sqrelu', ('fused_dense_sqrelu_dense only '
                                               'supports approximate activation_function sqrelu')
100
    assert not (fused_dense_sqrelu_dense and fused_mlp)
101
    if process_group is not None:
102
103
        assert fused_mlp, 'Tensor Parallel is only implemented for FusedMLP'
    if not fused_mlp and not fused_dense_sqrelu_dense:
Tri Dao's avatar
Tri Dao committed
104
105
106
107
108
109
110
111
        assert config.activation_function in ['gelu_new', 'gelu_fast', 'gelu_approx', 'relu',
                                              'sqrelu', 'glu', 'swiglu', 'geglu']
        if config.activation_function in ['glu', 'swiglu', 'geglu']:
            activation = (F.sigmoid if config.activation_function == 'glu'
                          else (F.silu if config.activation_function == 'swiglu'
                                else F.gelu))
            mlp_cls = partial(GatedMlp, hidden_features=config.n_inner, activation=activation,
                              **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
112
        else:
Tri Dao's avatar
Tri Dao committed
113
114
115
116
117
118
119
120
121
122
            if config.activation_function == 'relu':
                activation = partial(F.relu, inplace=True)
            elif config.activation_function == 'sqrelu':
                activation = sqrelu_fwd
            else:
                approximate = ('tanh' if config.activation_function
                            in ['gelu_new', 'gelu_fast', 'gelu_approx'] else 'none')
                activation=partial(F.gelu, approximate=approximate)
            mlp_cls = partial(Mlp, hidden_features=config.n_inner, activation=activation,
                              **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
123
124
125
126
127
128
    else:
        mlp_checkpoint_lvl = getattr(config, 'mlp_checkpoint_lvl', 0)
        # mlp_checkpoint_lvl could be a list, which contains the checkpoint_lvl for each layer
        if isinstance(mlp_checkpoint_lvl, Sequence):
            assert layer_idx is not None
            mlp_checkpoint_lvl = mlp_checkpoint_lvl[layer_idx]
129
130
        if fused_mlp:
            if FusedMLP is None:
Tri Dao's avatar
Tri Dao committed
131
                raise ImportError('fused_dense is not installed')
132
            activation = ('gelu_approx' if config.activation_function
133
                          in ['gelu_new', 'gelu_fast', 'gelu_approx'] else config.activation_function)
134
            mlp_cls = FusedMLP if process_group is None else ParallelFusedMLP
135
136
137
            parallel_kwargs = ({'process_group': process_group,
                                'sequence_parallel': getattr(config, 'sequence_parallel', True)}
                               if process_group is not None else {})
Tri Dao's avatar
Tri Dao committed
138
            mlp_cls = partial(mlp_cls, hidden_features=config.n_inner, activation=activation,
139
                              checkpoint_lvl=mlp_checkpoint_lvl,
140
                              **parallel_kwargs, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
141
142
        elif fused_dense_sqrelu_dense:
            assert FusedDenseSqreluDense is not None
Tri Dao's avatar
Tri Dao committed
143
            mlp_cls = partial(FusedDenseSqreluDense, hidden_features=config.n_inner,
144
                              checkpoint_lvl=mlp_checkpoint_lvl, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
145
146
147
148
149
        else:
            raise RuntimeError('MLP type not supported')
    return mlp_cls


150
151
def create_block(config, layer_idx=None, process_group=None, device=None, dtype=None):
    factory_kwargs = {'device': device, 'dtype': dtype}
152
    sequence_parallel = getattr(config, 'sequence_parallel', True)
153
154
155
    mixer_cls = create_mixer_cls(config, layer_idx, process_group=process_group, **factory_kwargs)
    mlp_cls = create_mlp_cls(config, layer_idx, process_group=process_group, **factory_kwargs)
    norm_cls = partial(nn.LayerNorm, eps=config.layer_norm_epsilon, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
156
157
158
159
    # TD [2022-07-30]: Force residual in fp32, seems to make fp16 training more stable
    residual_in_fp32 = getattr(config, 'residual_in_fp32', False)
    resid_dropout1 = config.resid_pdrop if layer_idx is None or layer_idx > 0 else config.embd_pdrop
    prenorm = getattr(config, 'prenorm', True)
Tri Dao's avatar
Tri Dao committed
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
    parallel_block = getattr(config, 'parallel_block', False)
    if not parallel_block:
        block = Block(
            config.hidden_size, mixer_cls, mlp_cls, norm_cls=norm_cls,
            prenorm=prenorm, resid_dropout1=resid_dropout1, resid_dropout2=config.resid_pdrop,
            fused_dropout_add_ln=getattr(config, 'fused_dropout_add_ln', False),
            residual_in_fp32=residual_in_fp32,
            sequence_parallel=sequence_parallel and process_group is not None,
            mark_shared_params=process_group is not None
        )
    else:
        assert prenorm
        block = ParallelBlock(
            config.hidden_size, mixer_cls, mlp_cls, norm_cls=norm_cls,
            resid_dropout1=resid_dropout1, resid_dropout2=config.resid_pdrop,
            tied_norm=getattr(config, 'parallel_block_tied_norm', False),
            fused_dropout_add_ln=getattr(config, 'fused_dropout_add_ln', False),
            residual_in_fp32=residual_in_fp32,
            sequence_parallel=sequence_parallel and process_group is not None,
            mark_shared_params=process_group is not None
        )
Tri Dao's avatar
Tri Dao committed
181
182
183
184
    block.layer_idx = layer_idx
    return block


185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
class GPTPreTrainedModel(nn.Module):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
    def __init__(self, config, *inputs, **kwargs):
        super().__init__()
        if not isinstance(config, GPT2Config):
            raise ValueError(
                "Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
                "To create a model from a Google pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
                ))
        self.config = config

    @classmethod
201
202
    def from_pretrained(cls, model_name, config, *args, strict=True, device=None, dtype=None,
                        world_size=1, rank=0, **kwargs):
203
204
205
206
207
        """
        Instantiate a GPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.
        """
        # Instantiate model.
208
        model = cls(config, *args, device=device, dtype=dtype, **kwargs)
209
210
        # Load state_dict in cpu because we already initialized the model in GPU, and we don't
        # want extra stuff taking up more GPU memory
Tri Dao's avatar
Tri Dao committed
211
        state_dict = state_dict_from_pretrained(
212
            model_name, device='cpu', dtype=dtype
213
        )
Tri Dao's avatar
Tri Dao committed
214
        if model_name.startswith('gpt2'):
Tri Dao's avatar
Tri Dao committed
215
            state_dict = remap_state_dict_hf_gpt2(state_dict, config)
Tri Dao's avatar
Tri Dao committed
216
        elif model_name.startswith('facebook/opt'):
Tri Dao's avatar
Tri Dao committed
217
218
219
220
            state_dict = remap_state_dict_hf_opt(state_dict, config)
        elif model_name.startswith('EleutherAI/gpt-j-'):
            state_dict = remap_state_dict_hf_gptj(state_dict, config)
            strict = False  # We have rotary_emb.inf_freq buffers not in the GPT-J checkpoint
Tri Dao's avatar
Tri Dao committed
221
222
        elif model_name.startswith('EleutherAI/gpt-neox-'):
            state_dict = remap_state_dict_hf_gpt_neox(state_dict, config)
Tri Dao's avatar
Tri Dao committed
223
224
        else:
            raise NotImplementedError(f'Model {model_name} not supported')
225
226
227
        if world_size > 1:
            state_dict = shard_state_dict_tp(state_dict, config, world_size, rank)
        load_return = model.load_state_dict(state_dict, strict=strict)
228
229
230
        logger.info(load_return)
        return model

Tri Dao's avatar
Tri Dao committed
231

Tri Dao's avatar
Tri Dao committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True):
    if isinstance(module, nn.Linear):
        nn.init.normal_(module.weight, std=initializer_range)
        if module.bias is not None:
            nn.init.zeros_(module.bias)
    elif isinstance(module, nn.Embedding):
        nn.init.normal_(module.weight, std=initializer_range)

    if rescale_prenorm_residual:
        # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
        #   > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
        #   > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
        #   >   -- GPT-2 :: https://openai.com/blog/better-language-models/
        #
        # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
        for name, p in module.named_parameters():
            if name in ["out_proj.weight", "fc2.weight"]:
                # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
                nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))


254
class GPTModel(GPTPreTrainedModel):
Tri Dao's avatar
Tri Dao committed
255

256
    def __init__(self, config: GPT2Config, process_group=None, device=None, dtype=None):
257
        super().__init__(config)
258
259
        factory_kwargs = {'device': device, 'dtype': dtype}
        self.process_group = process_group
260
        self.sequence_parallel = getattr(config, 'sequence_parallel', True)
261
        assert config.activation_function in ['gelu', 'gelu_new', 'gelu_fast', 'gelu_approx',
Tri Dao's avatar
Tri Dao committed
262
                                              'relu', 'sqrelu', 'glu', 'swiglu', 'geglu']
263
264
265
        pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
        vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple)
                      * pad_vocab_size_multiple)
Tri Dao's avatar
Tri Dao committed
266
267
268
269
270
        # TD [2022-07-30]: Force residual in fp32, seems to make fp16 training more stable
        self.residual_in_fp32 = getattr(config, 'residual_in_fp32', False)
        # These 2 options are for OPT-350m
        self.prenorm = getattr(config, 'prenorm', True)
        word_embed_proj_dim = getattr(config, 'word_embed_proj_dim', None)
Tri Dao's avatar
Tri Dao committed
271
272
        # For GPT-J, GPT-NeoX
        self.parallel_block = getattr(config, 'parallel_block', False)
Tri Dao's avatar
Tri Dao committed
273

274
        if process_group is None:
Tri Dao's avatar
Tri Dao committed
275
276
277
278
            self.embeddings = GPT2Embeddings(
                config.hidden_size, vocab_size, config.max_position_embeddings,
                word_embed_proj_dim=word_embed_proj_dim, **factory_kwargs
            )
279
280
        else:
            self.embeddings = ParallelGPT2Embeddings(
281
                config.hidden_size, vocab_size, config.max_position_embeddings,
282
283
                process_group=process_group, sequence_parallel=self.sequence_parallel,
                **factory_kwargs
284
            )
Tri Dao's avatar
Tri Dao committed
285

Tri Dao's avatar
Tri Dao committed
286
        # We change the order of dropout, residual and layer norm:
Tri Dao's avatar
Tri Dao committed
287
        # Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
Tri Dao's avatar
Tri Dao committed
288
289
290
        # Dropout -> Add -> LN -> Attn / MLP, returning both the residual branch (output of Add) and
        # the main branch (output of MLP). The model definition is unchanged, but the mapping of the
        # nn.Dropout probabilities are changed.
Tri Dao's avatar
Tri Dao committed
291
        # This is for performance reason: we can fuse dropout + add + layer_norm.
Tri Dao's avatar
Tri Dao committed
292
293
294
295
        self.layers = nn.ModuleList([create_block(config, layer_idx=i, process_group=process_group,
                                                  **factory_kwargs)
                                     for i in range(config.num_hidden_layers)])

Tri Dao's avatar
Tri Dao committed
296
        self.fused_dropout_add_ln = getattr(config, 'fused_dropout_add_ln', False)
297
298
299
300
        if self.fused_dropout_add_ln:
            if ((not self.parallel_block and dropout_add_layer_norm is None)
                or (self.parallel_block and dropout_add_layer_norm_parallel_residual is None)):
                raise ImportError('dropout_layer_norm is not installed')
Tri Dao's avatar
Tri Dao committed
301
302
303
304
        if self.prenorm:
            self.drop_f = nn.Dropout(config.resid_pdrop)
            self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon,
                                    **factory_kwargs)
305
        if process_group is not None:
Tri Dao's avatar
Tri Dao committed
306
            for p in self.ln_f.parameters():
307
308
309
310
311
                # Mark the norm parameters as "shared_params" so that we sync their values at init.
                p._shared_params = True
                # Mark the norm params as "sequence_parallel" so we run all-reduce on their grads.
                if self.sequence_parallel:
                    p._sequence_parallel = True
312

Tri Dao's avatar
Tri Dao committed
313
314
        self.apply(partial(_init_weights, n_layer=config.num_hidden_layers,
                           initializer_range=config.initializer_range))
315
316
317
        self.tie_weights()

    def tie_weights(self):
318
        if self.process_group is not None:
319
            sync_shared_params(self, self.process_group)
Tri Dao's avatar
Tri Dao committed
320

Tri Dao's avatar
Tri Dao committed
321
    def forward(self, input_ids, position_ids=None, inference_params=None):
322
323
324
325
        # If using Tensor Parallel with sequence parallel, we combine the batch and the seqlen
        # dimensions so that we can split on it easily, in case of small batch size.
        # Only the attention layers need to know the seqlen.
        embedding_kwargs = ({'combine_batch_seqlen_dim': True}
326
                            if self.process_group is not None and self.sequence_parallel else {})
327
        hidden_states = self.embeddings(input_ids, position_ids=position_ids, **embedding_kwargs)
Tri Dao's avatar
Tri Dao committed
328
329
        if self.parallel_block:
            hidden_states2 = None
Tri Dao's avatar
Tri Dao committed
330
        residual = None
331
332
        mixer_kwargs = ({'seqlen': input_ids.shape[1]}
                        if self.process_group is not None and self.sequence_parallel else {})
Tri Dao's avatar
Tri Dao committed
333
334
        if inference_params is not None:
            mixer_kwargs['inference_params'] = inference_params
Tri Dao's avatar
Tri Dao committed
335
        for layer in self.layers:
Tri Dao's avatar
Tri Dao committed
336
            if self.prenorm:
Tri Dao's avatar
Tri Dao committed
337
338
339
340
341
342
343
                if not self.parallel_block:
                    hidden_states, residual = layer(hidden_states, residual,
                                                    mixer_kwargs=mixer_kwargs)
                else:
                    hidden_states, hidden_states2, residual = layer(
                        hidden_states, hidden_states2, residual, mixer_kwargs=mixer_kwargs
                    )
Tri Dao's avatar
Tri Dao committed
344
345
346
347
348
            else:
                hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
        if self.prenorm:
            if not self.fused_dropout_add_ln:
                dropped = self.drop_f(hidden_states)
Tri Dao's avatar
Tri Dao committed
349
350
351
352
353
354
                if not self.parallel_block:
                    residual = (dropped + residual) if residual is not None else dropped
                else:
                    dropped2 = self.drop_f(hidden_states2)
                    residual = ((residual + dropped + dropped2)
                                if residual is not None else dropped + dropped2)
Tri Dao's avatar
Tri Dao committed
355
356
                hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
            else:
Tri Dao's avatar
Tri Dao committed
357
                # Set prenorm=False here since we don't need the residual
358
359
360
361
362
363
364
365
366
367
368
369
                if not self.parallel_block:
                    hidden_states = dropout_add_layer_norm(
                        hidden_states, residual, self.ln_f.weight, self.ln_f.bias,
                        self.drop_f.p if self.training else 0.0, self.ln_f.eps, prenorm=False,
                        residual_in_fp32=self.residual_in_fp32
                    )
                else:
                    hidden_states, _ = dropout_add_layer_norm_parallel_residual(
                        hidden_states, hidden_states2, residual, self.ln_f.weight, self.ln_f.bias,
                        None, None, self.drop_f.p if self.training else 0.0, self.ln_f.eps,
                        prenorm=False, residual_in_fp32=self.residual_in_fp32
                    )
Tri Dao's avatar
Tri Dao committed
370
371
372
        return hidden_states


Tri Dao's avatar
Tri Dao committed
373
class GPTLMHeadModel(GPTPreTrainedModel, GenerationMixin):
Tri Dao's avatar
Tri Dao committed
374

375
376
    def __init__(self, config: GPT2Config, process_group=None, device=None, dtype=None):
        factory_kwargs = {'device': device, 'dtype': dtype}
377
        super().__init__(config)
378
379
        self.process_group = process_group
        self.transformer = GPTModel(config, process_group=process_group, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
380
        self.tie_word_embeddings = getattr(config, 'tie_word_embeddings', True)
Tri Dao's avatar
Tri Dao committed
381
        lm_head_bias = getattr(config, 'lm_head_bias', False)
382
383
384
        pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
        vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple)
                      * pad_vocab_size_multiple)
Tri Dao's avatar
Tri Dao committed
385
386
387
388
389
390
391
        # This option is for OPT-350m
        word_embed_proj_dim = getattr(config, 'word_embed_proj_dim', None)
        embed_dim = config.n_embd if word_embed_proj_dim is None else word_embed_proj_dim
        if word_embed_proj_dim is not None:
            self.project_out = nn.Linear(config.n_embd, embed_dim, bias=False, **factory_kwargs)
        else:
            self.project_out = None
392
        if process_group is None:
Tri Dao's avatar
Tri Dao committed
393
            self.lm_head = nn.Linear(embed_dim, vocab_size, bias=lm_head_bias, **factory_kwargs)
394
395
396
        else:
            if ColumnParallelLinear is None:
                raise ImportError('fused_dense_lib is not installed')
397
            self.lm_head = ColumnParallelLinear(
Tri Dao's avatar
Tri Dao committed
398
                embed_dim, vocab_size, process_group, bias=lm_head_bias,
399
400
                sequence_parallel=getattr(config, 'sequence_parallel', True), **factory_kwargs
            )
Tri Dao's avatar
Tri Dao committed
401
402
403
404
405
406
        # Initialize weights and apply final processing
        self.apply(partial(_init_weights, n_layer=config.num_hidden_layers,
                           initializer_range=config.initializer_range))
        self.tie_weights()

    def tie_weights(self):
Tri Dao's avatar
Tri Dao committed
407
408
        if self.tie_word_embeddings:
            self.lm_head.weight = self.transformer.embeddings.word_embeddings.weight
409
        if self.process_group is not None:
410
            sync_shared_params(self, self.process_group)
Tri Dao's avatar
Tri Dao committed
411

Tri Dao's avatar
Tri Dao committed
412
413
414
415
416
417
418
    def forward(self, input_ids, position_ids=None, inference_params=None):
        """
            inference_params: for generation. Adapted from Megatron-LM (and Apex)
            https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
        """
        hidden_states = self.transformer(input_ids, position_ids=position_ids,
                                         inference_params=inference_params)
Tri Dao's avatar
Tri Dao committed
419
420
        if self.project_out is not None:
            hidden_states = self.project_out(hidden_states)
Tri Dao's avatar
Tri Dao committed
421
        lm_logits = self.lm_head(hidden_states)
422
423
424
425
        # During inference, we want the full logit for sampling
        if isinstance(self.lm_head, ColumnParallelLinear) and inference_params is not None:
            lm_logits, _ = all_gather_raw(lm_logits, self.lm_head.process_group)
            lm_logits = rearrange(lm_logits, '(n b) s d -> b s (n d)', b=hidden_states.shape[0])
Tri Dao's avatar
Tri Dao committed
426
427
        CausalLMOutput = namedtuple('CausalLMOutput', ['logits'])
        return CausalLMOutput(logits=lm_logits)
428

Tri Dao's avatar
Tri Dao committed
429
430
431
432
433
    def load_state_dict(self, state_dict, strict=True):
        # Remapping from our checkpoints that used a different ordering of layers in the block
        # Previous: Attn / MLP -> Dropout -> Add -> LN
        # Current: Dropout -> Add -> LN -> Attn / MLP
        if 'transformer.ln_0.weight' in state_dict:
Tri Dao's avatar
Tri Dao committed
434
            n_layers = len(self.transformer.layers)
Tri Dao's avatar
Tri Dao committed
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
            ln_weight = state_dict.pop(f'transformer.layers.{n_layers - 1}.norm2.weight')
            ln_bias = state_dict.pop(f'transformer.layers.{n_layers - 1}.norm2.bias')
            state_dict['transformer.ln_f.weight'] = ln_weight
            state_dict['transformer.ln_f.bias'] = ln_bias
            for l in reversed(range(n_layers)):
                ln_weight = state_dict.pop(f'transformer.layers.{l}.norm1.weight')
                ln_bias = state_dict.pop(f'transformer.layers.{l}.norm1.bias')
                state_dict[f'transformer.layers.{l}.norm2.weight'] = ln_weight
                state_dict[f'transformer.layers.{l}.norm2.bias'] = ln_bias
                if l > 0:
                    ln_weight = state_dict.pop(f'transformer.layers.{l - 1}.norm2.weight')
                    ln_bias = state_dict.pop(f'transformer.layers.{l - 1}.norm2.bias')
                    state_dict[f'transformer.layers.{l}.norm1.weight'] = ln_weight
                    state_dict[f'transformer.layers.{l}.norm1.bias'] = ln_bias
            ln_weight = state_dict.pop('transformer.ln_0.weight')
            ln_bias = state_dict.pop('transformer.ln_0.bias')
            state_dict[f'transformer.layers.0.norm1.weight'] = ln_weight
            state_dict[f'transformer.layers.0.norm1.bias'] = ln_bias
        return super().load_state_dict(state_dict, strict=strict)

455

Tri Dao's avatar
Tri Dao committed
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
def shard_state_dict_tp(state_dict, config, world_size, rank):
    """Convert the state_dict of a standard GPT model to the state_dict of a GPT model
    with tensor parallel.
    """
    pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
    vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
    assert vocab_size % world_size == 0
    assert config.hidden_size % world_size == 0
    inner_dim = config.n_inner if config.n_inner is not None else 4 * config.hidden_size
    assert inner_dim % world_size == 0

    def shard_first_dim(state_dict, key):
        x = state_dict[key]
        dim = x.shape[0] // world_size
        state_dict[key] = x[rank * dim:(rank + 1) * dim]

    def shard_last_dim(state_dict, key):
        x = state_dict[key]
        dim = x.shape[-1] // world_size
        state_dict[key] = x[..., rank * dim:(rank + 1) * dim]

    def shard_qkv_headdim(state_dict, key):
        x = rearrange(state_dict[key], '(three d) ... -> three d ...', three=3)
        dim = x.shape[1] // world_size
        state_dict[key] = rearrange(x[:, rank * dim:(rank + 1) * dim],
                                    'three d ... -> (three d) ...')

    shard_first_dim(state_dict, 'transformer.embeddings.word_embeddings.weight')
    if 'lm_head.weight' in state_dict:
        shard_first_dim(state_dict, 'lm_head.weight')
    if 'transformer.embeddings.position_embeddings.weight' in state_dict:
        shard_last_dim(state_dict, 'transformer.embeddings.position_embeddings.weight')
    for i in range(config.num_hidden_layers):
        shard_qkv_headdim(state_dict, f'transformer.layers.{i}.mixer.Wqkv.weight')
        shard_qkv_headdim(state_dict, f'transformer.layers.{i}.mixer.Wqkv.bias')
        shard_last_dim(state_dict, f'transformer.layers.{i}.mixer.out_proj.weight')
        if rank != 0:
            state_dict.pop(f'transformer.layers.{i}.mixer.out_proj.bias')
        shard_first_dim(state_dict, f'transformer.layers.{i}.mlp.fc1.weight')
        shard_first_dim(state_dict, f'transformer.layers.{i}.mlp.fc1.bias')
        shard_last_dim(state_dict, f'transformer.layers.{i}.mlp.fc2.weight')
        if rank != 0:
            state_dict.pop(f'transformer.layers.{i}.mlp.fc2.bias')
    return state_dict


def combine_state_dicts_tp(state_dicts, config):
    """Convert the state_dict of a standard GPT model to the state_dict of a GPT model
    with tensor parallel.
    """
    world_size = len(state_dicts)
    keys = state_dicts[0].keys()
    pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
    vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
    assert vocab_size % world_size == 0
    assert config.hidden_size % world_size == 0
    inner_dim = config.n_inner if config.n_inner is not None else 4 * config.hidden_size
    assert inner_dim % world_size == 0

    # The word embeddings from Megatron are weird, for each shard only the first
    # vocab_size // world_size coordinates are nonzero.
    def combine_word_embeddings(state_dicts, state_dict, key):
        assert all(s[key].shape[0] == vocab_size for s in state_dicts)
        state_dict[key] = torch.cat([s[key][:vocab_size // world_size] for s in state_dicts], dim=0)

    def combine_dim(state_dicts, state_dict, key, dim=-1):
        state_dict[key] = torch.cat([s[key] for s in state_dicts], dim=dim)

    def combine_qkv_headdim(state_dicts, state_dict, key):
        xs = [rearrange(s[key], '(three d) ... -> three d ...', three=3) for s in state_dicts]
        state_dict[key] = rearrange(torch.cat(xs, dim=1), 'three d ... -> (three d) ...')

    state_dict = state_dicts[0].copy()  # don't modify state_dict[0] inplace
    combine_word_embeddings(state_dicts, state_dict, 'transformer.embeddings.word_embeddings.weight')
    if 'lm_head.weight' in state_dict:
        combine_word_embeddings(state_dicts, state_dict, 'lm_head.weight')
    if 'transformer.embeddings.position_embeddings.weight' in state_dict:
        combine_dim(state_dicts, state_dict, 'transformer.embeddings.position_embeddings.weight', -1)
    for i in range(config.num_hidden_layers):
        combine_qkv_headdim(state_dicts, state_dict, f'transformer.layers.{i}.mixer.Wqkv.weight')
        combine_qkv_headdim(state_dicts, state_dict, f'transformer.layers.{i}.mixer.Wqkv.bias')
        combine_dim(state_dicts, state_dict, f'transformer.layers.{i}.mixer.out_proj.weight', -1)
        combine_dim(state_dicts, state_dict, f'transformer.layers.{i}.mlp.fc1.weight', 0)
        combine_dim(state_dicts, state_dict, f'transformer.layers.{i}.mlp.fc1.bias', 0)
        combine_dim(state_dicts, state_dict, f'transformer.layers.{i}.mlp.fc2.weight', -1)
    return state_dict


def remap_state_dict_hf_gpt2(state_dict, config):
545
546
547
548
549
550
    # Word embedding and position embedding
    def key_mapping_pos_emb(key):
        return re.sub(r'^wpe.', 'transformer.embeddings.position_embeddings.', key)
    state_dict = OrderedDict((key_mapping_pos_emb(k), v) for k, v in state_dict.items())
    word_embeddings = state_dict.pop('wte.weight')
    # It's possible that vocab_size is padded to be a multiple of 8, for example.
551
552
    pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
    vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
553
    state_dict['transformer.embeddings.word_embeddings.weight'] = F.pad(
554
        word_embeddings, (0, 0, 0, vocab_size - word_embeddings.shape[0])
555
556
557
558
    )
    state_dict['lm_head.weight'] = state_dict['transformer.embeddings.word_embeddings.weight']

    # LayerNorm
Tri Dao's avatar
Tri Dao committed
559
560
561
562
563
    def key_mapping_ln(key):
        key = re.sub(r'^ln_f.(weight|bias)', r'transformer.ln_f.\1', key)
        key = re.sub(r'^h.(\d+).ln_(1|2).(weight|bias)', r'transformer.layers.\1.norm\2.\3', key)
        return key
    state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590

    # MLP
    for d in range(config.num_hidden_layers):
        W1 = state_dict.pop(f'h.{d}.mlp.c_fc.weight')
        state_dict[f'transformer.layers.{d}.mlp.fc1.weight'] = W1.t()
        W2 = state_dict.pop(f'h.{d}.mlp.c_proj.weight')
        state_dict[f'transformer.layers.{d}.mlp.fc2.weight'] = W2.t()
    def key_mapping_mlp(key):
        key = re.sub(r'^h.(\d+).mlp.c_fc.bias', r'transformer.layers.\1.mlp.fc1.bias', key)
        key = re.sub(r'^h.(\d+).mlp.c_proj.bias', r'transformer.layers.\1.mlp.fc2.bias', key)
        return key
    state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())

    # Attention
    for d in range(config.num_hidden_layers):
        state_dict.pop(f'h.{d}.attn.bias')  # We don't store this bias
        Wqkv = state_dict.pop(f'h.{d}.attn.c_attn.weight')
        state_dict[f'transformer.layers.{d}.mixer.Wqkv.weight'] = Wqkv.t()
        Wout = state_dict.pop(f'h.{d}.attn.c_proj.weight')
        state_dict[f'transformer.layers.{d}.mixer.out_proj.weight'] = Wout.t()
    def key_mapping_attn(key):
        key = re.sub(r'^h.(\d+).attn.c_attn.bias', r'transformer.layers.\1.mixer.Wqkv.bias', key)
        key = re.sub(r'^h.(\d+).attn.c_proj.bias', r'transformer.layers.\1.mixer.out_proj.bias', key)
        return key
    state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())

    return state_dict
591
592


Tri Dao's avatar
Tri Dao committed
593
594
595
596
597
598
599
600
601
602
603
604
def remap_state_dict_megatron(state_dict, config):
    def key_mapping_transformer(key):
        key = re.sub(r'^language_model.encoder.', 'transformer.', key)
        key = re.sub(r'^language_model.', 'transformer.', key)
        return key
    state_dict = OrderedDict((key_mapping_transformer(k), v) for k, v in state_dict.items())
    # Word embedding and position embedding
    def key_mapping_pos_emb(key):
        return re.sub(r'^wpe.', 'transformer.embeddings.position_embeddings.', key)
    state_dict = OrderedDict((key_mapping_pos_emb(k), v) for k, v in state_dict.items())
    word_embeddings = state_dict.pop('transformer.embedding.word_embeddings.weight')
    # It's possible that vocab_size is padded to be a multiple of 8, for example.
605
606
    pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1)
    vocab_size = (math.ceil(config.vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
Tri Dao's avatar
Tri Dao committed
607
608
609
610
    state_dict['transformer.embeddings.word_embeddings.weight'] = F.pad(
        word_embeddings, (0, 0, 0, vocab_size - word_embeddings.shape[0])
    )
    state_dict['lm_head.weight'] = state_dict['transformer.embeddings.word_embeddings.weight']
611

Tri Dao's avatar
Tri Dao committed
612
613
614
615
616
617
618
619
620
    # LayerNorm
    def key_mapping_ln(key):
        key = re.sub(r'^transformer.final_layernorm.(weight|bias)', r'transformer.ln_f.\1', key)
        key = re.sub(r'^transformer.layers.(\d+).input_layernorm.(weight|bias)',
                     r'transformer.layers.\1.norm1.\2', key)
        key = re.sub(r'^transformer.layers.(\d+).post_attention_layernorm.(weight|bias)',
                     r'transformer.layers.\1.norm2.\2', key)
        return key
    state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
621

Tri Dao's avatar
Tri Dao committed
622
623
624
625
626
627
628
629
    # MLP
    def key_mapping_mlp(key):
        key = re.sub(r'^transformer.layers.(\d+).mlp.dense_h_to_4h.(weight|bias)',
                     r'transformer.layers.\1.mlp.fc1.\2', key)
        key = re.sub(r'^transformer.layers.(\d+).mlp.dense_4h_to_h.(weight|bias)',
                     r'transformer.layers.\1.mlp.fc2.\2', key)
        return key
    state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())
630

Tri Dao's avatar
Tri Dao committed
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
    # Attention
    def key_mapping_attn(key):
        key = re.sub(r'^transformer.layers.(\d+).self_attention.rotary_emb.inv_freq',
                     r'transformer.layers.\1.mixer.rotary_emb.inv_freq', key)
        key = re.sub(r'^transformer.layers.(\d+).self_attention.query_key_value.(weight|bias)',
                     r'transformer.layers.\1.mixer.Wqkv.\2', key)
        key = re.sub(r'^transformer.layers.(\d+).self_attention.dense.(weight|bias)',
                     r'transformer.layers.\1.mixer.out_proj.\2', key)
        return key
    state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())
    # Megatron stores Wqkv as ((nheads 3 headdim), hidden_dim)
    # while we store Wqkv as ((3 nheads headdim), hidden_dim)
    headdim = config.hidden_size // config.num_attention_heads
    for d in range(config.num_hidden_layers):
        Wqkv = state_dict.pop(f'transformer.layers.{d}.mixer.Wqkv.weight')
        state_dict[f'transformer.layers.{d}.mixer.Wqkv.weight'] = rearrange(
            Wqkv, '(nheads three headdim) ... -> (three nheads headdim) ...',
            three=3, headdim=headdim
        )
        bqkv = state_dict.pop(f'transformer.layers.{d}.mixer.Wqkv.bias')
        state_dict[f'transformer.layers.{d}.mixer.Wqkv.bias'] = rearrange(
            bqkv, '(nheads three headdim) -> (three nheads headdim)',
            three=3, headdim=headdim
        )
655
656

    return state_dict