internlm.py 10.5 KB
Newer Older
Jia Guoqing's avatar
Jia Guoqing committed
1
# -*- coding: utf-8 -*-
2
from typing import List, Optional, Tuple
Jia Guoqing's avatar
Jia Guoqing committed
3
4
5
6
7
8
9

import torch
from torch import nn
from transformers import LlamaConfig

from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
Woosuk Kwon's avatar
Woosuk Kwon committed
10
from vllm.model_executor.layers.attention import PagedAttention
11
from vllm.model_executor.layers.layernorm import RMSNorm
12
13
14
15
from vllm.model_executor.layers.linear import (LinearMethodBase,
                                               MergedColumnParallelLinear,
                                               QKVParallelLinear,
                                               RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
16
from vllm.model_executor.layers.rotary_embedding import get_rope
Jia Guoqing's avatar
Jia Guoqing committed
17
from vllm.model_executor.layers.sampler import Sampler
18
19
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
Jia Guoqing's avatar
Jia Guoqing committed
20
from vllm.model_executor.parallel_utils.parallel_state import (
21
    get_tensor_model_parallel_world_size)
22
from vllm.model_executor.sampling_metadata import SamplingMetadata
23
24
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
25
from vllm.sequence import SamplerOutput
Jia Guoqing's avatar
Jia Guoqing committed
26
27
28
29
30
31
32
33
34
35
36

KVCache = Tuple[torch.Tensor, torch.Tensor]


class InternLMMLP(nn.Module):

    def __init__(
        self,
        hidden_size: int,
        intermediate_size: int,
        hidden_act: str,
37
        linear_method: Optional[LinearMethodBase] = None,
Jia Guoqing's avatar
Jia Guoqing committed
38
39
    ):
        super().__init__()
40
41
        self.gate_up_proj = MergedColumnParallelLinear(
            hidden_size, [intermediate_size] * 2,
42
            bias=False,
43
44
45
46
47
            linear_method=linear_method)
        self.down_proj = RowParallelLinear(intermediate_size,
                                           hidden_size,
                                           bias=False,
                                           linear_method=linear_method)
Jia Guoqing's avatar
Jia Guoqing committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        if hidden_act != "silu":
            raise ValueError(f"Unsupported activation: {hidden_act}. "
                             "Only silu is supported for now.")
        self.act_fn = SiluAndMul()

    def forward(self, x):
        gate_up, _ = self.gate_up_proj(x)
        x = self.act_fn(gate_up)
        x, _ = self.down_proj(x)
        return x


class InternLMAttention(nn.Module):

    def __init__(
        self,
        hidden_size: int,
        num_heads: int,
Woosuk Kwon's avatar
Woosuk Kwon committed
66
        bias: bool,
67
68
        rope_theta: float = 10000,
        max_position_embeddings: int = 8192,
69
        linear_method: Optional[LinearMethodBase] = None,
Jia Guoqing's avatar
Jia Guoqing committed
70
71
72
73
74
75
76
77
78
79
80
    ):
        super().__init__()
        self.hidden_size = hidden_size
        tensor_model_parallel_world_size = (
            get_tensor_model_parallel_world_size())
        self.total_num_heads = num_heads
        assert self.total_num_heads % tensor_model_parallel_world_size == 0
        self.num_heads = (self.total_num_heads //
                          tensor_model_parallel_world_size)
        self.head_dim = hidden_size // self.total_num_heads
        self.scaling = self.head_dim**-0.5
81
82
        self.rope_theta = rope_theta
        self.max_position_embeddings = max_position_embeddings
Jia Guoqing's avatar
Jia Guoqing committed
83

84
        self.qkv_proj = QKVParallelLinear(
Jia Guoqing's avatar
Jia Guoqing committed
85
            hidden_size,
86
87
            self.head_dim,
            self.total_num_heads,
Woosuk Kwon's avatar
Woosuk Kwon committed
88
            bias=bias,
89
            linear_method=linear_method,
Jia Guoqing's avatar
Jia Guoqing committed
90
91
92
93
        )
        self.o_proj = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            hidden_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
94
            bias=bias,
95
            linear_method=linear_method,
Jia Guoqing's avatar
Jia Guoqing committed
96
        )
Woosuk Kwon's avatar
Woosuk Kwon committed
97
        self.rotary_emb = get_rope(
98
            self.head_dim,
Woosuk Kwon's avatar
Woosuk Kwon committed
99
            rotary_dim=self.head_dim,
100
            max_position=self.max_position_embeddings,
Woosuk Kwon's avatar
Woosuk Kwon committed
101
102
103
            base=self.rope_theta,
        )
        self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
Jia Guoqing's avatar
Jia Guoqing committed
104
105
106
107
108
109
110
111
112
113
114

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.qkv_proj(hidden_states)
        q, k, v = qkv.chunk(chunks=3, dim=-1)
Woosuk Kwon's avatar
Woosuk Kwon committed
115
        q, k = self.rotary_emb(positions, q, k)
Jia Guoqing's avatar
Jia Guoqing committed
116
        k_cache, v_cache = kv_cache
Woosuk Kwon's avatar
Woosuk Kwon committed
117
118
        attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata,
                                cache_event)
Jia Guoqing's avatar
Jia Guoqing committed
119
120
121
122
123
124
        output, _ = self.o_proj(attn_output)
        return output


class InternLMDecoderLayer(nn.Module):

125
126
127
128
129
    def __init__(
        self,
        config: LlamaConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Jia Guoqing's avatar
Jia Guoqing committed
130
131
        super().__init__()
        self.hidden_size = config.hidden_size
132
133
134
        rope_theta = getattr(config, "rope_theta", 10000)
        max_position_embeddings = getattr(config, "max_position_embeddings",
                                          8192)
Jia Guoqing's avatar
Jia Guoqing committed
135
136
137
        self.self_attn = InternLMAttention(
            hidden_size=self.hidden_size,
            num_heads=config.num_attention_heads,
Woosuk Kwon's avatar
Woosuk Kwon committed
138
            bias=config.bias,
139
140
            rope_theta=rope_theta,
            max_position_embeddings=max_position_embeddings,
141
            linear_method=linear_method,
Jia Guoqing's avatar
Jia Guoqing committed
142
143
144
145
146
        )
        self.mlp = InternLMMLP(
            hidden_size=self.hidden_size,
            intermediate_size=config.intermediate_size,
            hidden_act=config.hidden_act,
147
            linear_method=linear_method,
Jia Guoqing's avatar
Jia Guoqing committed
148
149
150
151
152
153
154
155
156
157
158
159
160
        )
        self.input_layernorm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)
        self.post_attention_layernorm = RMSNorm(config.hidden_size,
                                                eps=config.rms_norm_eps)

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
161
162
        residual: Optional[torch.Tensor],
    ) -> Tuple[torch.Tensor, torch.Tensor]:
Jia Guoqing's avatar
Jia Guoqing committed
163
        # Self Attention
164
165
166
167
168
169
        if residual is None:
            residual = hidden_states
            hidden_states = self.input_layernorm(hidden_states)
        else:
            hidden_states, residual = self.input_layernorm(
                hidden_states, residual)
Jia Guoqing's avatar
Jia Guoqing committed
170
171
172
173
174
175
176
177
178
        hidden_states = self.self_attn(
            positions=positions,
            hidden_states=hidden_states,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )

        # Fully Connected
179
180
        hidden_states, residual = self.post_attention_layernorm(
            hidden_states, residual)
Jia Guoqing's avatar
Jia Guoqing committed
181
        hidden_states = self.mlp(hidden_states)
182
        return hidden_states, residual
Jia Guoqing's avatar
Jia Guoqing committed
183
184
185
186


class InternLMModel(nn.Module):

187
188
189
190
191
    def __init__(
        self,
        config: LlamaConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Jia Guoqing's avatar
Jia Guoqing committed
192
193
194
195
196
197
198
        super().__init__()
        self.config = config
        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size

        vocab_size = ((config.vocab_size + 63) // 64) * 64
        self.embed_tokens = VocabParallelEmbedding(
199
200
201
            vocab_size,
            config.hidden_size,
        )
Jia Guoqing's avatar
Jia Guoqing committed
202
        self.layers = nn.ModuleList([
203
            InternLMDecoderLayer(config, linear_method)
Jia Guoqing's avatar
Jia Guoqing committed
204
205
206
207
208
209
210
211
212
213
214
215
216
            for _ in range(config.num_hidden_layers)
        ])
        self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        hidden_states = self.embed_tokens(input_ids)
217
        residual = None
Jia Guoqing's avatar
Jia Guoqing committed
218
        for i in range(len(self.layers)):
219
            cache_event = None if cache_events is None else cache_events[i]
Jia Guoqing's avatar
Jia Guoqing committed
220
            layer = self.layers[i]
221
            hidden_states, residual = layer(
Jia Guoqing's avatar
Jia Guoqing committed
222
223
224
225
226
                positions,
                hidden_states,
                kv_caches[i],
                input_metadata,
                cache_event,
227
                residual,
Jia Guoqing's avatar
Jia Guoqing committed
228
            )
229
        hidden_states, _ = self.norm(hidden_states, residual)
Jia Guoqing's avatar
Jia Guoqing committed
230
231
232
233
234
        return hidden_states


class InternLMForCausalLM(nn.Module):

235
236
237
238
239
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
Jia Guoqing's avatar
Jia Guoqing committed
240
241
        super().__init__()
        self.config = config
242
243
244
        self.linear_method = linear_method
        self.model = InternLMModel(config, linear_method)
        self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
Jia Guoqing's avatar
Jia Guoqing committed
245
246
247
248
249
250
251
252
253
        self.sampler = Sampler(config.vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
254
    ) -> torch.Tensor:
Jia Guoqing's avatar
Jia Guoqing committed
255
256
        hidden_states = self.model(input_ids, positions, kv_caches,
                                   input_metadata, cache_events)
257
258
259
260
261
262
263
        return hidden_states

    def sample(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> SamplerOutput:
Jia Guoqing's avatar
Jia Guoqing committed
264
        next_tokens = self.sampler(self.lm_head.weight, hidden_states,
265
                                   sampling_metadata)
Jia Guoqing's avatar
Jia Guoqing committed
266
267
268
269
270
        return next_tokens

    def load_weights(self,
                     model_name_or_path: str,
                     cache_dir: Optional[str] = None,
Jasmond L's avatar
Jasmond L committed
271
272
                     load_format: str = "auto",
                     revision: Optional[str] = None):
273
274
275
276
277
278
279
280
281
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]
        params_dict = dict(self.named_parameters())
Jia Guoqing's avatar
Jia Guoqing committed
282
        for name, loaded_weight in hf_model_weights_iterator(
Jasmond L's avatar
Jasmond L committed
283
                model_name_or_path, cache_dir, load_format, revision):
Jia Guoqing's avatar
Jia Guoqing committed
284
285
            if "rotary_emb.inv_freq" in name:
                continue
286
            for (param_name, weight_name, shard_id) in stacked_params_mapping:
Jia Guoqing's avatar
Jia Guoqing committed
287
288
                if weight_name not in name:
                    continue
289
290
291
                param = params_dict[name.replace(weight_name, param_name)]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
Jia Guoqing's avatar
Jia Guoqing committed
292
                break
293
294
295
296
297
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)