llama.py 12.7 KB
Newer Older
1
# coding=utf-8
2
3
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
Woosuk Kwon's avatar
Woosuk Kwon committed
4
# Copyright 2023 The vLLM team.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
23
24
25
26
27
"""Inference-only LLaMA model compatible with HuggingFace weights.

The input of the model is flattened to a 1D tensor of tokens. The model uses
InputMetadata to extract the original 2D shape of the input.
"""
Woosuk Kwon's avatar
Woosuk Kwon committed
28
29
30
31
32
33
from typing import Dict, List, Optional, Tuple

import torch
from torch import nn
from transformers import LlamaConfig

Woosuk Kwon's avatar
Woosuk Kwon committed
34
35
36
37
38
39
40
41
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.weight_utils import (hf_model_weights_iterator,
                                              load_tensor_parallel_weights)
from vllm.model_executor.parallel_utils.parallel_state import (
Woosuk Kwon's avatar
Woosuk Kwon committed
42
    get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
43
from vllm.model_executor.parallel_utils.tensor_parallel import (
44
    VocabParallelEmbedding, ColumnParallelLinear, RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
45
from vllm.sequence import SequenceOutputs
Woosuk Kwon's avatar
Woosuk Kwon committed
46
47
48
49
50

KVCache = Tuple[torch.Tensor, torch.Tensor]


class LlamaMLP(nn.Module):
51

Woosuk Kwon's avatar
Woosuk Kwon committed
52
53
54
55
56
57
58
    def __init__(
        self,
        hidden_size: int,
        intermediate_size: int,
        hidden_act: str,
    ):
        super().__init__()
59
60
61
62
        self.gate_up_proj = ColumnParallelLinear(hidden_size,
                                                 2 * intermediate_size,
                                                 bias=False,
                                                 gather_output=False,
63
                                                 perform_initialization=False)
64
65
66
67
        self.down_proj = RowParallelLinear(intermediate_size,
                                           hidden_size,
                                           bias=False,
                                           input_is_parallel=True,
Woosuk Kwon's avatar
Woosuk Kwon committed
68
                                           perform_initialization=False)
69
70
71
        if hidden_act != "silu":
            raise ValueError(f"Unsupported activation: {hidden_act}. "
                             "Only silu is supported for now.")
Woosuk Kwon's avatar
Woosuk Kwon committed
72
        self.act_fn = SiluAndMul()
Woosuk Kwon's avatar
Woosuk Kwon committed
73
74

    def forward(self, x):
75
        gate_up, _ = self.gate_up_proj(x)
Woosuk Kwon's avatar
Woosuk Kwon committed
76
        x = self.act_fn(gate_up)
Woosuk Kwon's avatar
Woosuk Kwon committed
77
78
79
80
81
82
83
84
85
86
87
88
89
        x, _ = self.down_proj(x)
        return x


class LlamaAttention(nn.Module):

    def __init__(
        self,
        hidden_size: int,
        num_heads: int,
    ):
        super().__init__()
        self.hidden_size = hidden_size
90
91
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
Woosuk Kwon's avatar
Woosuk Kwon committed
92
93
        self.total_num_heads = num_heads
        assert self.total_num_heads % tensor_model_parallel_world_size == 0
94
95
        self.num_heads = (self.total_num_heads //
                          tensor_model_parallel_world_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
96
        self.head_dim = hidden_size // self.total_num_heads
97
        self.scaling = self.head_dim**-0.5
Woosuk Kwon's avatar
Woosuk Kwon committed
98

99
        self.qkv_proj = ColumnParallelLinear(
Woosuk Kwon's avatar
Woosuk Kwon committed
100
            hidden_size,
101
            3 * self.total_num_heads * self.head_dim,
Woosuk Kwon's avatar
Woosuk Kwon committed
102
103
104
105
106
107
108
109
110
111
112
            bias=False,
            gather_output=False,
            perform_initialization=False,
        )
        self.o_proj = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            hidden_size,
            bias=False,
            input_is_parallel=True,
            perform_initialization=False,
        )
113
114
115
116
        self.attn = PagedAttentionWithRoPE(self.num_heads,
                                           self.head_dim,
                                           self.scaling,
                                           rotary_dim=self.head_dim)
Woosuk Kwon's avatar
Woosuk Kwon committed
117
118
119

    def forward(
        self,
120
        positions: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
121
122
123
124
125
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
126
        qkv, _ = self.qkv_proj(hidden_states)
Woosuk Kwon's avatar
Woosuk Kwon committed
127
        q, k, v = qkv.chunk(chunks=3, dim=-1)
128
        k_cache, v_cache = kv_cache
129
130
        attn_output = self.attn(positions, q, k, v, k_cache, v_cache,
                                input_metadata, cache_event)
Woosuk Kwon's avatar
Woosuk Kwon committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
        output, _ = self.o_proj(attn_output)
        return output


class LlamaDecoderLayer(nn.Module):

    def __init__(self, config: LlamaConfig):
        super().__init__()
        self.hidden_size = config.hidden_size
        self.self_attn = LlamaAttention(
            hidden_size=self.hidden_size,
            num_heads=config.num_attention_heads,
        )
        self.mlp = LlamaMLP(
            hidden_size=self.hidden_size,
            intermediate_size=config.intermediate_size,
            hidden_act=config.hidden_act,
        )
149
150
151
152
        self.input_layernorm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)
        self.post_attention_layernorm = RMSNorm(config.hidden_size,
                                                eps=config.rms_norm_eps)
Woosuk Kwon's avatar
Woosuk Kwon committed
153
154
155

    def forward(
        self,
156
        positions: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        # Self Attention
        residual = hidden_states
        hidden_states = self.input_layernorm(hidden_states)
        hidden_states = self.self_attn(
            positions=positions,
            hidden_states=hidden_states,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )
        hidden_states = residual + hidden_states

        # Fully Connected
        residual = hidden_states
        hidden_states = self.post_attention_layernorm(hidden_states)
        hidden_states = self.mlp(hidden_states)
        hidden_states = residual + hidden_states
        return hidden_states


class LlamaModel(nn.Module):

    def __init__(self, config: LlamaConfig):
        super().__init__()
        self.config = config
        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size

190
        vocab_size = ((config.vocab_size + 63) // 64) * 64
191
        self.embed_tokens = VocabParallelEmbedding(
192
            vocab_size, config.hidden_size, perform_initialization=False)
193
194
195
        self.layers = nn.ModuleList([
            LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)
        ])
196
        self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
Woosuk Kwon's avatar
Woosuk Kwon committed
197
198
199

    def forward(
        self,
200
201
        input_ids: torch.Tensor,
        positions: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        hidden_states = self.embed_tokens(input_ids)
        for i in range(len(self.layers)):
            if cache_events is None:
                cache_event = None
            else:
                cache_event = cache_events[i]
            layer = self.layers[i]
            hidden_states = layer(
                positions,
                hidden_states,
                kv_caches[i],
                input_metadata,
                cache_event,
            )
        hidden_states = self.norm(hidden_states)
        return hidden_states


class LlamaForCausalLM(nn.Module):
225

Woosuk Kwon's avatar
Woosuk Kwon committed
226
227
228
229
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.model = LlamaModel(config)
230
        vocab_size = ((config.vocab_size + 63) // 64) * 64
Woosuk Kwon's avatar
Woosuk Kwon committed
231
        self.lm_head = ColumnParallelLinear(config.hidden_size,
232
                                            vocab_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
233
234
235
                                            bias=False,
                                            gather_output=False,
                                            perform_initialization=False)
Woosuk Kwon's avatar
Woosuk Kwon committed
236
        self.sampler = Sampler(config.vocab_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
237
238
239

    def forward(
        self,
240
241
        input_ids: torch.Tensor,
        positions: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
242
243
244
245
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> Dict[int, SequenceOutputs]:
246
247
248
249
        hidden_states = self.model(input_ids, positions, kv_caches,
                                   input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head.weight, hidden_states,
                                   input_metadata)
Woosuk Kwon's avatar
Woosuk Kwon committed
250
251
        return next_tokens

252
253
254
255
    _column_parallel_weights = [
        "embed_tokens.weight", "lm_head.weight", "qkv_proj.weight",
        "gate_proj.weight", "up_proj.weight"
    ]
Woosuk Kwon's avatar
Woosuk Kwon committed
256
257
    _row_parallel_weights = ["o_proj.weight", "down_proj.weight"]

258
259
    def load_weights(self,
                     model_name_or_path: str,
260
261
                     cache_dir: Optional[str] = None,
                     use_np_cache: bool = False):
262
263
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
Woosuk Kwon's avatar
Woosuk Kwon committed
264
265
        tensor_model_parallel_rank = get_tensor_model_parallel_rank()
        state_dict = self.state_dict()
266
267

        for name, loaded_weight in hf_model_weights_iterator(
268
                model_name_or_path, cache_dir, use_np_cache):
269
270
271
            if "rotary_emb.inv_freq" in name:
                continue

272
273
274
275
276
277
278
279
280
281
282
            if "embed_tokens" in name or "lm_head" in name:
                param = state_dict[name]
                # Consider padding in the vocab size.
                padded_vocab_size = (param.shape[0] *
                                     tensor_model_parallel_world_size)
                num_extra_rows = padded_vocab_size - self.config.vocab_size
                extra_rows = torch.empty(num_extra_rows,
                                         loaded_weight.shape[1])
                extra_rows = extra_rows.to(loaded_weight)
                loaded_weight = torch.cat([loaded_weight, extra_rows], dim=0)

283
            is_attention_weight = False
284
285
            for stride_id, att_weight_name in enumerate(
                ["q_proj", "k_proj", "v_proj"]):
286
287
288
289
290
                if att_weight_name not in name:
                    continue
                param = state_dict[name.replace(att_weight_name, "qkv_proj")]
                shard_size = param.shape[0] // 3
                loaded_weight = loaded_weight[
291
292
293
294
                    shard_size * tensor_model_parallel_rank:shard_size *
                    (tensor_model_parallel_rank + 1)]
                param_slice = param.data[shard_size * stride_id:shard_size *
                                         (stride_id + 1)]
295
296
297
298
299
300
301
302
303
304
305
306
307
308
                assert param_slice.shape == loaded_weight.shape
                param_slice.copy_(loaded_weight)
                is_attention_weight = True
                break
            if is_attention_weight:
                continue

            is_gate_up_weight = False
            for stride_id, weight_name in enumerate(["gate_proj", "up_proj"]):
                if weight_name not in name:
                    continue
                param = state_dict[name.replace(weight_name, "gate_up_proj")]
                shard_size = param.shape[0] // 2
                loaded_weight = loaded_weight[
309
310
311
312
                    shard_size * tensor_model_parallel_rank:shard_size *
                    (tensor_model_parallel_rank + 1)]
                param_slice = param.data[shard_size * stride_id:shard_size *
                                         (stride_id + 1)]
313
314
315
316
317
318
319
320
321
322
                assert param_slice.shape == loaded_weight.shape
                param_slice.copy_(loaded_weight)
                is_gate_up_weight = True
                break
            if is_gate_up_weight:
                continue

            param = state_dict[name]
            load_tensor_parallel_weights(param, loaded_weight, name,
                                         self._column_parallel_weights,
323
324
                                         self._row_parallel_weights,
                                         tensor_model_parallel_rank)