gpt_neox.py 10.6 KB
Newer Older
1
# coding=utf-8
2
3
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt_neox/modeling_gpt_neox.py
Woosuk Kwon's avatar
Woosuk Kwon committed
4
# Copyright 2023 The vLLM team.
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Woosuk Kwon's avatar
Woosuk Kwon committed
18
"""Inference-only GPT-NeoX model compatible with HuggingFace weights."""
19
from typing import List, Optional, Tuple
20
21
22

import torch
from torch import nn
23
24
from transformers import GPTNeoXConfig

Woosuk Kwon's avatar
Woosuk Kwon committed
25
26
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
Woosuk Kwon's avatar
Woosuk Kwon committed
27
from vllm.model_executor.layers.attention import PagedAttention
28
29
30
31
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
                                               LinearMethodBase,
                                               QKVParallelLinear,
                                               RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
32
from vllm.model_executor.layers.rotary_embedding import get_rope
Woosuk Kwon's avatar
Woosuk Kwon committed
33
from vllm.model_executor.layers.sampler import Sampler
34
35
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
Woosuk Kwon's avatar
Woosuk Kwon committed
36
from vllm.model_executor.parallel_utils.parallel_state import (
37
    get_tensor_model_parallel_world_size)
38
from vllm.model_executor.sampling_metadata import SamplingMetadata
39
40
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
41
from vllm.sequence import SamplerOutput
42
43
44
45
46
47

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GPTNeoXAttention(nn.Module):

48
49
50
51
52
    def __init__(
        self,
        config: GPTNeoXConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
53
54
55
56
57
        super().__init__()
        self.total_num_heads = config.num_attention_heads
        self.hidden_size = config.hidden_size
        self.head_size = self.hidden_size // self.total_num_heads

58
59
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
60
        assert self.total_num_heads % tensor_model_parallel_world_size == 0
61
62
63
        self.num_heads = (self.total_num_heads //
                          tensor_model_parallel_world_size)

64
        self.query_key_value = QKVParallelLinear(
65
            config.hidden_size,
66
67
68
            self.head_size,
            self.total_num_heads,
            linear_method=linear_method,
69
70
71
72
        )
        self.dense = RowParallelLinear(
            config.hidden_size,
            config.hidden_size,
73
            linear_method=linear_method,
74
        )
75
        scaling = self.head_size**-0.5
76
77
        rotary_dim = int(self.head_size * config.rotary_pct)
        assert rotary_dim % 2 == 0
78
79
80
        rope_theta = getattr(config, "rope_theta", 10000)
        max_position_embeddings = getattr(config, "max_position_embeddings",
                                          8192)
Woosuk Kwon's avatar
Woosuk Kwon committed
81
        self.rotary_emb = get_rope(
82
            self.head_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
83
84
            rotary_dim=rotary_dim,
            max_position=max_position_embeddings,
85
            base=rope_theta,
Woosuk Kwon's avatar
Woosuk Kwon committed
86
87
        )
        self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
88
89
90

    def forward(
        self,
91
        position_ids: torch.Tensor,
92
93
94
95
96
97
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
    ) -> torch.Tensor:
        qkv, _ = self.query_key_value(hidden_states)
        q, k, v = qkv.chunk(chunks=3, dim=-1)
Woosuk Kwon's avatar
Woosuk Kwon committed
98
        q, k = self.rotary_emb(position_ids, q, k)
99
        k_cache, v_cache = kv_cache
100
        attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata)
101
102
103
104
105
        output, _ = self.dense(attn_output)
        return output


class GPTNeoXMLP(nn.Module):
106

107
108
109
110
111
    def __init__(
        self,
        config: GPTNeoXConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
112
        super().__init__()
113
114
115
        self.dense_h_to_4h = ColumnParallelLinear(
            config.hidden_size,
            config.intermediate_size,
116
            linear_method=linear_method,
117
118
119
120
        )
        self.dense_4h_to_h = RowParallelLinear(
            config.intermediate_size,
            config.hidden_size,
121
            linear_method=linear_method,
122
        )
123
124
125
        quant_config = getattr(linear_method, "quant_config", None)
        self.act = get_act_fn(config.hidden_act, quant_config,
                              config.intermediate_size)
126
127
128
129
130
131
132
133
134
135

    def forward(self, hidden_states):
        hidden_states, _ = self.dense_h_to_4h(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states, _ = self.dense_4h_to_h(hidden_states)
        return hidden_states


class GPTNeoXLayer(nn.Module):

136
137
138
139
140
    def __init__(
        self,
        config: GPTNeoXConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
141
142
        super().__init__()
        self.use_parallel_residual = config.use_parallel_residual
143
144
145
146
        self.input_layernorm = nn.LayerNorm(config.hidden_size,
                                            eps=config.layer_norm_eps)
        self.post_attention_layernorm = nn.LayerNorm(config.hidden_size,
                                                     eps=config.layer_norm_eps)
147
148
        self.attention = GPTNeoXAttention(config, linear_method)
        self.mlp = GPTNeoXMLP(config, linear_method)
149
150
151

    def forward(
        self,
152
        position_ids: torch.Tensor,
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
    ) -> torch.Tensor:
        attn_input = self.input_layernorm(hidden_states)
        attn_output = self.attention(
            position_ids=position_ids,
            hidden_states=attn_input,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
        )

        if self.use_parallel_residual:
            # pseudocode:
            # x = x + attn(ln1(x)) + mlp(ln2(x))
            mlp_input = self.post_attention_layernorm(hidden_states)
            mlp_output = self.mlp(mlp_input)
            hidden_states = mlp_output + attn_output + hidden_states
        else:
            # pseudocode:
            # x = x + attn(ln1(x))
            # x = x + mlp(ln2(x))
            attn_output = attn_output + hidden_states
            mlp_input = self.post_attention_layernorm(attn_output)
            mlp_output = self.mlp(mlp_input)
            hidden_states = mlp_output + attn_output
        return hidden_states


class GPTNeoXModel(nn.Module):
Woosuk Kwon's avatar
Woosuk Kwon committed
183

184
185
186
187
188
    def __init__(
        self,
        config: GPTNeoXConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
189
190
191
        super().__init__()
        self.config = config

192
193
194
195
        self.embed_in = VocabParallelEmbedding(
            config.vocab_size,
            config.hidden_size,
        )
196
197
198
199
        self.layers = nn.ModuleList([
            GPTNeoXLayer(config, linear_method)
            for _ in range(config.num_hidden_layers)
        ])
200
201
        self.final_layer_norm = nn.LayerNorm(config.hidden_size,
                                             eps=config.layer_norm_eps)
202
203
204

    def forward(
        self,
205
206
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
    ) -> torch.Tensor:
        hidden_states = self.embed_in(input_ids)
        for i in range(len(self.layers)):
            layer = self.layers[i]
            hidden_states = layer(
                position_ids,
                hidden_states,
                kv_caches[i],
                input_metadata,
            )
        hidden_states = self.final_layer_norm(hidden_states)
        return hidden_states


class GPTNeoXForCausalLM(nn.Module):

225
226
227
228
229
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
230
231
        super().__init__()
        self.config = config
232
233
234
        self.linear_method = linear_method
        self.gpt_neox = GPTNeoXModel(config, linear_method)
        self.embed_out = ParallelLMHead(
235
            config.vocab_size,
236
            config.hidden_size,
237
        )
Woosuk Kwon's avatar
Woosuk Kwon committed
238
        self.sampler = Sampler(config.vocab_size)
239
240
241

    def forward(
        self,
242
243
        input_ids: torch.Tensor,
        positions: torch.Tensor,
244
245
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
246
    ) -> torch.Tensor:
247
        hidden_states = self.gpt_neox(input_ids, positions, kv_caches,
248
                                      input_metadata)
249
250
251
252
253
254
255
        return hidden_states

    def sample(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> SamplerOutput:
256
        next_tokens = self.sampler(self.embed_out.weight, hidden_states,
257
                                   sampling_metadata)
258
259
        return next_tokens

260
261
    def load_weights(self,
                     model_name_or_path: str,
262
                     cache_dir: Optional[str] = None,
Jasmond L's avatar
Jasmond L committed
263
264
                     load_format: str = "auto",
                     revision: Optional[str] = None):
265
        params_dict = dict(self.named_parameters())
266
        for name, loaded_weight in hf_model_weights_iterator(
Jasmond L's avatar
Jasmond L committed
267
                model_name_or_path, cache_dir, load_format, revision):
268
            if ("attention.bias" in name or "attention.masked_bias" in name
269
                    or "rotary_emb.inv_freq" in name):
270
                continue
271
272
            param = params_dict[name]

273
            if "query_key_value" in name:
274
275
276
                # NOTE: GPT-NeoX's fused QKV's output_dim has the shape of
                # (num_heads * 3 * head_size), while the
                # required shape is (3 * num_heads * head_size).
277
                # Thus, we need weight conversion.
278
                output_dim = getattr(param, "output_dim", None)
279
                num_heads = self.config.num_attention_heads
280
281
282
283
284
285
286
287
288
289
290
291
                if output_dim is not None:
                    loaded_weight_shape = loaded_weight.shape
                    loaded_weight = loaded_weight.view(
                        loaded_weight_shape[:output_dim] + (num_heads, 3, -1) +
                        loaded_weight_shape[output_dim + 1:])
                    loaded_weight = loaded_weight.transpose(
                        output_dim, output_dim + 1)
                    loaded_weight = loaded_weight.reshape(loaded_weight_shape)

            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)