gpt_j.py 9.76 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# coding=utf-8
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gptj/modeling_gptj.py
# Copyright 2023 The vLLM team.
# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Woosuk Kwon's avatar
Woosuk Kwon committed
18
"""Inference-only GPT-J model compatible with HuggingFace weights."""
19
from typing import List, Optional, Tuple
20
21
22
23
24
25
26
27

import torch
from torch import nn
from transformers import GPTJConfig

from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
28
29
30
31
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
                                               LinearMethodBase,
                                               QKVParallelLinear,
                                               RowParallelLinear)
32
from vllm.model_executor.layers.sampler import Sampler
33
34
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
35
from vllm.model_executor.parallel_utils.parallel_state import (
36
37
38
    get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
39
from vllm.sequence import SamplerOutput
40
41
42
43
44
45

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GPTJAttention(nn.Module):

46
47
48
49
50
    def __init__(
        self,
        config: GPTJConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
51
52
53
54
55
        super().__init__()
        self.total_num_heads = config.num_attention_heads
        self.hidden_size = config.hidden_size
        self.head_size = self.hidden_size // self.total_num_heads

56
        self.qkv_proj = QKVParallelLinear(
57
            config.hidden_size,
58
59
            self.head_size,
            self.total_num_heads,
60
            bias=False,
61
            linear_method=linear_method,
62
63
64
65
66
        )
        self.out_proj = RowParallelLinear(
            config.hidden_size,
            config.hidden_size,
            bias=False,
67
            linear_method=linear_method,
68
        )
69
70
71
72
73
74

        tp_world_size = get_tensor_model_parallel_world_size()
        assert self.total_num_heads % tp_world_size == 0
        self.num_heads = self.total_num_heads // tp_world_size

        scaling = self.head_size**-0.5
75
        assert getattr(config, "rotary", True)
76
        assert config.rotary_dim % 2 == 0
77
78
79
80
81
82
83
84
85
86
87
        rope_theta = getattr(config, "rope_theta", 10000)
        max_position_embeddings = getattr(config, "max_position_embeddings",
                                          8192)
        self.attn = PagedAttentionWithRoPE(
            self.num_heads,
            self.head_size,
            scaling,
            config.rotary_dim,
            base=rope_theta,
            max_position=max_position_embeddings,
            is_neox_style=False)
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
        self.warmup = False

    def forward(
        self,
        position_ids: torch.Tensor,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.qkv_proj(hidden_states)
        q, k, v = qkv.chunk(chunks=3, dim=-1)
        k_cache, v_cache = kv_cache
        attn_output = self.attn(position_ids, q, k, v, k_cache, v_cache,
                                input_metadata, cache_event)
        attn_output, _ = self.out_proj(attn_output)
        return attn_output


class GPTJMLP(nn.Module):

109
110
111
112
113
114
    def __init__(
        self,
        intermediate_size: int,
        config: GPTJConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
115
116
        super().__init__()
        hidden_size = config.n_embd
117
118
119
        self.fc_in = ColumnParallelLinear(
            hidden_size,
            intermediate_size,
120
            linear_method=linear_method,
121
122
123
124
        )
        self.fc_out = RowParallelLinear(
            intermediate_size,
            hidden_size,
125
            linear_method=linear_method,
126
        )
127
128
129
        quant_config = getattr(linear_method, "quant_config", None)
        self.act = get_act_fn(config.activation_function, quant_config,
                              intermediate_size)
130
131
132
133
134
135
136
137
138
139

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states, _ = self.fc_in(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states, _ = self.fc_out(hidden_states)
        return hidden_states


class GPTJBlock(nn.Module):

140
141
142
143
144
    def __init__(
        self,
        config: GPTJConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
145
        super().__init__()
146
        inner_dim = 4 * config.n_embd if config.n_inner is None else config.n_inner
147
        self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
148
149
        self.attn = GPTJAttention(config, linear_method)
        self.mlp = GPTJMLP(inner_dim, config, linear_method)
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

    def forward(
        self,
        position_ids: torch.Tensor,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        residual = hidden_states
        hidden_states = self.ln_1(hidden_states)
        attn_output = self.attn(
            position_ids=position_ids,
            hidden_states=hidden_states,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )
        mlp_output = self.mlp(hidden_states)
        hidden_states = attn_output + mlp_output + residual
        return hidden_states


class GPTJModel(nn.Module):

175
176
177
178
179
    def __init__(
        self,
        config: GPTJConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
180
181
182
        super().__init__()
        self.config = config
        self.embed_dim = config.n_embd
183
184
185
186
        self.wte = VocabParallelEmbedding(
            config.vocab_size,
            self.embed_dim,
        )
187
        self.h = nn.ModuleList(
188
            [GPTJBlock(config, linear_method) for _ in range(config.n_layer)])
189
190
191
192
193
194
195
196
197
198
199
200
        self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)

    def forward(
        self,
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        hidden_states = self.wte(input_ids)
        for i in range(len(self.h)):
201
            cache_event = None if cache_events is None else cache_events[i]
202
203
204
205
206
207
208
209
210
211
212
213
214
215
            layer = self.h[i]
            hidden_states = layer(
                position_ids,
                hidden_states,
                kv_caches[i],
                input_metadata,
                cache_event,
            )
        hidden_states = self.ln_f(hidden_states)
        return hidden_states


class GPTJForCausalLM(nn.Module):

216
217
218
219
220
    def __init__(
        self,
        config: GPTJConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
221
222
        super().__init__()
        self.config = config
223
        self.linear_method = linear_method
224
        assert not config.tie_word_embeddings
225
226
        self.transformer = GPTJModel(config, linear_method)
        self.lm_head = ParallelLMHead(
227
            config.vocab_size,
228
229
            config.n_embd,
            bias=True,
230
        )
231
232
233
234
235
236
237
238
239
        self.sampler = Sampler(config.vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
240
    ) -> SamplerOutput:
241
242
243
244
245
246
247
248
249
        hidden_states = self.transformer(input_ids, positions, kv_caches,
                                         input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head.weight, hidden_states,
                                   input_metadata, self.lm_head.bias)
        return next_tokens

    def load_weights(self,
                     model_name_or_path: str,
                     cache_dir: Optional[str] = None,
Jasmond L's avatar
Jasmond L committed
250
251
                     load_format: str = "auto",
                     revision: Optional[str] = None):
252
253
254
255
256
257
258
259
260
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]
        params_dict = dict(self.named_parameters())
261
        for name, loaded_weight in hf_model_weights_iterator(
Jasmond L's avatar
Jasmond L committed
262
                model_name_or_path, cache_dir, load_format, revision):
263
264
            if "attn.bias" in name or "attn.masked_bias" in name:
                continue
265
266
            for (param_name, weight_name, shard_id) in stacked_params_mapping:
                if weight_name not in name:
267
                    continue
268
269
270
                param = params_dict[name.replace(weight_name, param_name)]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
271
                break
272
273
274
275
276
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)