gpt2.py 10.1 KB
Newer Older
1
# coding=utf-8
2
3
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py
Woosuk Kwon's avatar
Woosuk Kwon committed
4
# Copyright 2023 The vLLM team.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
19
20
21
22
23
"""Inference-only GPT-2 model compatible with HuggingFace weights.

The input of the model is flattened to a 1D tensor of tokens. The model uses
InputMetadata to extract the original 2D shape of the input.
"""
24
from typing import List, Optional, Tuple
Woosuk Kwon's avatar
Woosuk Kwon committed
25
26
27
28
29

import torch
from torch import nn
from transformers import GPT2Config

Woosuk Kwon's avatar
Woosuk Kwon committed
30
31
32
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import PagedAttention
33
34
35
36
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
                                               LinearMethodBase,
                                               QKVParallelLinear,
                                               RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
37
from vllm.model_executor.layers.sampler import Sampler
38
39
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding)
Woosuk Kwon's avatar
Woosuk Kwon committed
40
from vllm.model_executor.parallel_utils.parallel_state import (
41
42
43
    get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
44
from vllm.sequence import SamplerOutput
Woosuk Kwon's avatar
Woosuk Kwon committed
45
46
47
48
49
50

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GPT2Attention(nn.Module):

51
52
53
54
55
    def __init__(
        self,
        config: GPT2Config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Woosuk Kwon's avatar
Woosuk Kwon committed
56
57
58
        super().__init__()
        self.hidden_size = config.hidden_size
        total_num_heads = config.num_attention_heads
59
60
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
Woosuk Kwon's avatar
Woosuk Kwon committed
61
62
63
        assert total_num_heads % tensor_model_parallel_world_size == 0
        self.num_heads = total_num_heads // tensor_model_parallel_world_size
        self.head_dim = self.hidden_size // total_num_heads
64
        self.scale = self.head_dim**-0.5
Woosuk Kwon's avatar
Woosuk Kwon committed
65

66
        self.c_attn = QKVParallelLinear(
67
            self.hidden_size,
68
69
            self.head_dim,
            total_num_heads,
70
            bias=True,
71
            linear_method=linear_method,
72
73
74
75
76
        )
        self.c_proj = RowParallelLinear(
            self.hidden_size,
            self.hidden_size,
            bias=True,
77
            linear_method=linear_method,
78
        )
79
80
        self.attn = PagedAttention(self.num_heads,
                                   self.head_dim,
Woosuk Kwon's avatar
Woosuk Kwon committed
81
                                   scale=self.scale)
Woosuk Kwon's avatar
Woosuk Kwon committed
82
83
84
85
86
87
88
89
90
91
92

    def forward(
        self,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.c_attn(hidden_states)
        q, k, v = qkv.chunk(chunks=3, dim=-1)
        key_cache, value_cache = kv_cache
93
94
        attn_output = self.attn(q, k, v, key_cache, value_cache,
                                input_metadata, cache_event)
Woosuk Kwon's avatar
Woosuk Kwon committed
95
96
97
98
99
100
101
102
103
104
        attn_output, _ = self.c_proj(attn_output)
        return attn_output


class GPT2MLP(nn.Module):

    def __init__(
        self,
        intermediate_size: int,
        config: GPT2Config,
105
        linear_method: Optional[LinearMethodBase] = None,
Woosuk Kwon's avatar
Woosuk Kwon committed
106
107
108
    ):
        super().__init__()
        hidden_size = config.hidden_size
109
110
111
112
        self.c_fc = ColumnParallelLinear(
            hidden_size,
            intermediate_size,
            bias=True,
113
            linear_method=linear_method,
114
115
116
117
118
        )
        self.c_proj = RowParallelLinear(
            intermediate_size,
            hidden_size,
            bias=True,
119
            linear_method=linear_method,
120
        )
121
122
123
        quant_config = getattr(linear_method, "quant_config", None)
        self.act = get_act_fn(config.activation_function, quant_config,
                              intermediate_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
124
125
126
127
128
129
130
131
132
133

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states, _ = self.c_fc(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states, _ = self.c_proj(hidden_states)
        return hidden_states


class GPT2Block(nn.Module):

134
135
136
137
138
    def __init__(
        self,
        config: GPT2Config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Woosuk Kwon's avatar
Woosuk Kwon committed
139
140
        super().__init__()
        hidden_size = config.hidden_size
141
142
        inner_dim = (config.n_inner if config.n_inner is not None else 4 *
                     hidden_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
143
144

        self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
145
        self.attn = GPT2Attention(config, linear_method)
Woosuk Kwon's avatar
Woosuk Kwon committed
146
        self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
147
        self.mlp = GPT2MLP(inner_dim, config, linear_method)
Woosuk Kwon's avatar
Woosuk Kwon committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

    def forward(
        self,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        residual = hidden_states
        hidden_states = self.ln_1(hidden_states)
        attn_output = self.attn(
            hidden_states=hidden_states,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )
        # residual connection
        hidden_states = attn_output + residual

        residual = hidden_states
        hidden_states = self.ln_2(hidden_states)
        feed_forward_hidden_states = self.mlp(hidden_states)
        # residual connection
        hidden_states = residual + feed_forward_hidden_states
        return hidden_states


class GPT2Model(nn.Module):

177
178
179
180
181
    def __init__(
        self,
        config: GPT2Config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Woosuk Kwon's avatar
Woosuk Kwon committed
182
183
        super().__init__()
        self.config = config
184
185
186
        assert not config.add_cross_attention
        assert not config.scale_attn_by_inverse_layer_idx
        assert not config.reorder_and_upcast_attn
Woosuk Kwon's avatar
Woosuk Kwon committed
187
        self.embed_dim = config.hidden_size
188
        self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim)
Woosuk Kwon's avatar
Woosuk Kwon committed
189
        self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
190
191
192
193
        self.h = nn.ModuleList([
            GPT2Block(config, linear_method)
            for _ in range(config.num_hidden_layers)
        ])
Woosuk Kwon's avatar
Woosuk Kwon committed
194
195
196
197
        self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)

    def forward(
        self,
198
199
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
200
201
202
203
204
205
206
207
208
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        inputs_embeds = self.wte(input_ids)
        position_embeds = self.wpe(position_ids)
        hidden_states = inputs_embeds + position_embeds

        for i in range(len(self.h)):
209
            cache_event = None if cache_events is None else cache_events[i]
Woosuk Kwon's avatar
Woosuk Kwon committed
210
            layer = self.h[i]
211
212
            hidden_states = layer(hidden_states, kv_caches[i], input_metadata,
                                  cache_event)
Woosuk Kwon's avatar
Woosuk Kwon committed
213
214
215
216
217
218
219

        hidden_states = self.ln_f(hidden_states)
        return hidden_states


class GPT2LMHeadModel(nn.Module):

220
221
222
223
224
    def __init__(
        self,
        config: GPT2Config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Woosuk Kwon's avatar
Woosuk Kwon committed
225
226
        super().__init__()
        self.config = config
227
228
        self.linear_method = linear_method
        self.transformer = GPT2Model(config, linear_method)
Woosuk Kwon's avatar
Woosuk Kwon committed
229
230
231
232
233
        self.lm_head_weight = self.transformer.wte.weight
        self.sampler = Sampler(config.vocab_size)

    def forward(
        self,
234
235
        input_ids: torch.Tensor,
        positions: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
236
237
238
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
239
    ) -> SamplerOutput:
240
241
242
243
        hidden_states = self.transformer(input_ids, positions, kv_caches,
                                         input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head_weight, hidden_states,
                                   input_metadata)
Woosuk Kwon's avatar
Woosuk Kwon committed
244
245
        return next_tokens

246
247
    def load_weights(self,
                     model_name_or_path: str,
Woosuk Kwon's avatar
Woosuk Kwon committed
248
                     cache_dir: Optional[str] = None,
Jasmond L's avatar
Jasmond L committed
249
250
                     load_format: str = "auto",
                     revision: Optional[str] = None):
251
        params_dict = dict(self.named_parameters(remove_duplicate=False))
Woosuk Kwon's avatar
Woosuk Kwon committed
252
        for name, loaded_weight in hf_model_weights_iterator(
Jasmond L's avatar
Jasmond L committed
253
                model_name_or_path, cache_dir, load_format, revision):
Woosuk Kwon's avatar
Woosuk Kwon committed
254
255
256
257
            if "lm_head.weight" in name:
                # GPT-2 ties the weights of the embedding layer and the final
                # linear layer.
                continue
258
            if ".attn.bias" in name or ".attn.masked_bias" in name:
Woosuk Kwon's avatar
Woosuk Kwon committed
259
260
261
                # Skip attention mask.
                # NOTE: "c_attn.bias" should not be skipped.
                continue
262
263
            if not name.startswith("transformer."):
                name = "transformer." + name
264
            param = params_dict[name]
Woosuk Kwon's avatar
Woosuk Kwon committed
265
266
            # The HF's GPT-2 implementation uses Conv1D instead of Linear.
            # Because of this, we need to transpose the weights.
267
            # Note(zhuohan): the logic below might break quantized models.
Woosuk Kwon's avatar
Woosuk Kwon committed
268
269
270
271
272
273
274
            for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
                if conv1d_weight_name not in name:
                    continue
                if not name.endswith(".weight"):
                    continue
                loaded_weight = loaded_weight.t()

275
276
277
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)