gpt_neox.py 11.2 KB
Newer Older
1
# coding=utf-8
2
3
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt_neox/modeling_gpt_neox.py
Woosuk Kwon's avatar
Woosuk Kwon committed
4
# Copyright 2023 The vLLM team.
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
18
19
20
21
22
"""Inference-only GPT-NeoX model compatible with HuggingFace weights.

The input of the model is flattened to a 1D tensor of tokens. The model uses
InputMetadata to extract the original 2D shape of the input.
"""
23
24
25
26
from typing import Dict, List, Optional, Tuple

import torch
from torch import nn
27
28
from transformers import GPTNeoXConfig

Woosuk Kwon's avatar
Woosuk Kwon committed
29
30
31
32
33
34
35
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.weight_utils import (hf_model_weights_iterator,
                                              load_tensor_parallel_weights)
from vllm.model_executor.parallel_utils.parallel_state import (
36
    get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
37
from vllm.model_executor.parallel_utils.tensor_parallel import (
38
    VocabParallelEmbedding, ColumnParallelLinear, RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
39
from vllm.sequence import SequenceOutputs
40
41
42
43
44
45

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GPTNeoXAttention(nn.Module):

46
    def __init__(self, config: GPTNeoXConfig):
47
48
49
50
51
        super().__init__()
        self.total_num_heads = config.num_attention_heads
        self.hidden_size = config.hidden_size
        self.head_size = self.hidden_size // self.total_num_heads

52
53
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
54
        assert self.total_num_heads % tensor_model_parallel_world_size == 0
55
56
57
58
59
60
61
62
63
64
        self.num_heads = (self.total_num_heads //
                          tensor_model_parallel_world_size)

        self.query_key_value = ColumnParallelLinear(
            config.hidden_size,
            3 * config.hidden_size,
            gather_output=False,
            perform_initialization=False)
        self.dense = RowParallelLinear(config.hidden_size,
                                       config.hidden_size,
65
66
67
                                       input_is_parallel=True,
                                       perform_initialization=False)

68
        scaling = self.head_size**-0.5
69
70
        rotary_dim = int(self.head_size * config.rotary_pct)
        assert rotary_dim % 2 == 0
Woosuk Kwon's avatar
Woosuk Kwon committed
71
72
        self.attn = PagedAttentionWithRoPE(self.num_heads, self.head_size,
                                           scaling, rotary_dim)
73
74
75

    def forward(
        self,
76
        position_ids: torch.Tensor,
77
78
79
80
81
82
83
84
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.query_key_value(hidden_states)
        q, k, v = qkv.chunk(chunks=3, dim=-1)
        k_cache, v_cache = kv_cache
85
86
        attn_output = self.attn(position_ids, q, k, v, k_cache, v_cache,
                                input_metadata, cache_event)
87
88
89
90
91
        output, _ = self.dense(attn_output)
        return output


class GPTNeoXMLP(nn.Module):
92

93
    def __init__(self, config: GPTNeoXConfig):
94
95
96
97
98
        super().__init__()
        self.dense_h_to_4h = ColumnParallelLinear(config.hidden_size,
                                                  config.intermediate_size,
                                                  gather_output=False,
                                                  perform_initialization=False)
99
100
        self.dense_4h_to_h = RowParallelLinear(config.intermediate_size,
                                               config.hidden_size,
101
102
                                               input_is_parallel=True,
                                               perform_initialization=False)
Woosuk Kwon's avatar
Woosuk Kwon committed
103
        self.act = get_act_fn(config.hidden_act)
104
105
106
107
108
109
110
111
112
113

    def forward(self, hidden_states):
        hidden_states, _ = self.dense_h_to_4h(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states, _ = self.dense_4h_to_h(hidden_states)
        return hidden_states


class GPTNeoXLayer(nn.Module):

114
    def __init__(self, config: GPTNeoXConfig):
115
116
        super().__init__()
        self.use_parallel_residual = config.use_parallel_residual
117
118
119
120
        self.input_layernorm = nn.LayerNorm(config.hidden_size,
                                            eps=config.layer_norm_eps)
        self.post_attention_layernorm = nn.LayerNorm(config.hidden_size,
                                                     eps=config.layer_norm_eps)
121
122
123
124
125
        self.attention = GPTNeoXAttention(config)
        self.mlp = GPTNeoXMLP(config)

    def forward(
        self,
126
        position_ids: torch.Tensor,
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        attn_input = self.input_layernorm(hidden_states)
        attn_output = self.attention(
            position_ids=position_ids,
            hidden_states=attn_input,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )

        if self.use_parallel_residual:
            # pseudocode:
            # x = x + attn(ln1(x)) + mlp(ln2(x))
            mlp_input = self.post_attention_layernorm(hidden_states)
            mlp_output = self.mlp(mlp_input)
            hidden_states = mlp_output + attn_output + hidden_states
        else:
            # pseudocode:
            # x = x + attn(ln1(x))
            # x = x + mlp(ln2(x))
            attn_output = attn_output + hidden_states
            mlp_input = self.post_attention_layernorm(attn_output)
            mlp_output = self.mlp(mlp_input)
            hidden_states = mlp_output + attn_output
        return hidden_states


class GPTNeoXModel(nn.Module):
Woosuk Kwon's avatar
Woosuk Kwon committed
159

160
    def __init__(self, config: GPTNeoXConfig):
161
162
163
        super().__init__()
        self.config = config

164
165
        self.embed_in = VocabParallelEmbedding(config.vocab_size,
                                               config.hidden_size,
166
                                               perform_initialization=False)
167
168
169
170
        self.layers = nn.ModuleList(
            [GPTNeoXLayer(config) for _ in range(config.num_hidden_layers)])
        self.final_layer_norm = nn.LayerNorm(config.hidden_size,
                                             eps=config.layer_norm_eps)
171
172
173

    def forward(
        self,
174
175
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        hidden_states = self.embed_in(input_ids)
        for i in range(len(self.layers)):
            if cache_events is None:
                cache_event = None
            else:
                cache_event = cache_events[i]
            layer = self.layers[i]
            hidden_states = layer(
                position_ids,
                hidden_states,
                kv_caches[i],
                input_metadata,
                cache_event,
            )
        hidden_states = self.final_layer_norm(hidden_states)
        return hidden_states


class GPTNeoXForCausalLM(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.gpt_neox = GPTNeoXModel(config)
204
205
206
207
        self.embed_out = ColumnParallelLinear(config.hidden_size,
                                              config.vocab_size,
                                              bias=False,
                                              gather_output=False,
208
                                              perform_initialization=False)
Woosuk Kwon's avatar
Woosuk Kwon committed
209
        self.sampler = Sampler(config.vocab_size)
210
211
212

    def forward(
        self,
213
214
        input_ids: torch.Tensor,
        positions: torch.Tensor,
215
216
217
218
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> Dict[int, SequenceOutputs]:
219
220
221
222
        hidden_states = self.gpt_neox(input_ids, positions, kv_caches,
                                      input_metadata, cache_events)
        next_tokens = self.sampler(self.embed_out.weight, hidden_states,
                                   input_metadata)
223
224
        return next_tokens

225
226
227
228
    _column_parallel_weights = [
        "embed_in.weight", "embed_out.weight", "dense_h_to_4h.weight",
        "dense_h_to_4h.bias"
    ]
229
230
    _row_parallel_weights = ["dense.weight", "dense_4h_to_h.weight"]

231
232
    def load_weights(self,
                     model_name_or_path: str,
233
234
                     cache_dir: Optional[str] = None,
                     use_np_cache: bool = False):
235
236
        tensor_model_parallel_rank = get_tensor_model_parallel_rank()
        state_dict = self.state_dict()
237
        for name, loaded_weight in hf_model_weights_iterator(
238
                model_name_or_path, cache_dir, use_np_cache):
239
            if ("attention.bias" in name or "attention.masked_bias" in name
240
                    or "rotary_emb.inv_freq" in name):
241
242
                continue
            param = state_dict[name]
243
244
            if "query_key_value" in name:
                # NOTE(woosuk): GPT-NeoX's fused QKV has the shape of
Woosuk Kwon's avatar
Woosuk Kwon committed
245
246
                # [num_heads * 3 * head_size, hidden_size], while the
                # required shape is [3 * num_heads * head_size, hidden_size].
247
248
                # Thus, we need weight conversion.
                shard_size = param.shape[0]
249
250
251
                loaded_weight = loaded_weight[
                    shard_size * tensor_model_parallel_rank:shard_size *
                    (tensor_model_parallel_rank + 1)]
252
253
254
255

                num_heads = self.config.num_attention_heads
                hidden_size = self.config.hidden_size
                head_size = hidden_size // num_heads
256
257
258
                if "query_key_value.weight" in name:
                    loaded_weight = loaded_weight.view(-1, 3, head_size,
                                                       hidden_size)
259
                    loaded_weight = loaded_weight.transpose(0, 1)
Woosuk Kwon's avatar
Woosuk Kwon committed
260
                    loaded_weight = loaded_weight.reshape(-1, hidden_size)
261
                elif "query_key_value.bias" in name:
262
263
                    loaded_weight = loaded_weight.view(-1, 3, head_size)
                    loaded_weight = loaded_weight.transpose(0, 1)
Woosuk Kwon's avatar
Woosuk Kwon committed
264
                    loaded_weight = loaded_weight.reshape(-1)
265
                else:
266
267
268
                    raise ValueError(f"Unexpected weight name: {name}")
            load_tensor_parallel_weights(param, loaded_weight, name,
                                         self._column_parallel_weights,
269
270
                                         self._row_parallel_weights,
                                         tensor_model_parallel_rank)