chatglm.py 12.5 KB
Newer Older
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
1
2
3
4
5
6
7
8
# coding=utf-8
# Adapted from
# https://github.com/THUDM/ChatGLM2-6B
"""Inference-only ChatGLM model compatible with THUDM weights.

The input of the model is flattened to a 1D tensor of tokens. The model uses
InputMetadata to extract the original 2D shape of the input.
"""
9
from typing import List, Optional, Tuple
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
10
11
12
13
14
15

import torch
from torch import nn
from torch.nn import LayerNorm

from vllm.model_executor.input_metadata import InputMetadata
16
from vllm.model_executor.layers.activation import SiluAndMul
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
17
18
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.layernorm import RMSNorm
19
20
21
22
from vllm.model_executor.layers.linear import (LinearMethodBase,
                                               MergedColumnParallelLinear,
                                               QKVParallelLinear,
                                               RowParallelLinear)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
23
from vllm.model_executor.layers.sampler import Sampler
24
25
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
26
from vllm.model_executor.parallel_utils.parallel_state import (
27
28
29
30
    get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
from vllm.sequence import SamplerOutput
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
31
32
33
34
35
36
37
from vllm.transformers_utils.configs import ChatGLMConfig

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GLMAttention(nn.Module):

38
39
40
41
42
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
43
44
45
46
47
48
49
50
51
52
        super().__init__()
        self.hidden_size = config.hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = config.num_attention_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.multi_query_attention = config.multi_query_attention
        self.total_num_kv_heads = (config.multi_query_group_num
                                   if config.multi_query_attention else
                                   config.num_attention_heads)
53
54
55
56
57
58
59
60
61
        if self.total_num_kv_heads >= tp_size:
            # Number of KV heads is greater than TP size, so we partition
            # the KV heads across multiple tensor parallel GPUs.
            assert self.total_num_kv_heads % tp_size == 0
        else:
            # Number of KV heads is less than TP size, so we replicate
            # the KV heads across multiple tensor parallel GPUs.
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
62
63
64
65
66
        self.head_dim = config.hidden_size // self.total_num_heads
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5

67
68
        self.query_key_value = QKVParallelLinear(
            self.hidden_size,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
69
            self.head_dim,
70
71
72
73
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=config.add_bias_linear or config.add_qkv_bias,
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
74
75
76
77
78
        )
        self.dense = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            config.hidden_size,
            bias=config.add_bias_linear,
79
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
        )

        self.attn = PagedAttentionWithRoPE(
            self.num_heads,
            self.head_dim,
            self.scaling,
            rotary_dim=self.head_dim // 2,
            num_kv_heads=self.num_kv_heads,
            is_neox_style=False,
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.query_key_value(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
        key_cache, value_cache = kv_cache

        context_layer = self.attn(
            position_ids,
            q,
            k,
            v,
            key_cache,
            value_cache,
            input_metadata,
            cache_event,
        )

        attn_output, _ = self.dense(context_layer)

        return attn_output


class GLMMLP(nn.Module):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
    state back into h hidden dimension.
    """

127
128
129
130
131
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
132
133
134
135
136
        super().__init__()

        self.add_bias = config.add_bias_linear

        # Project to 4h.
137
        self.dense_h_to_4h = MergedColumnParallelLinear(
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
138
            config.hidden_size,
139
            [config.ffn_hidden_size] * 2,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
140
            bias=config.add_bias_linear,
141
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
142
143
144
145
146
147
148
149
150
        )

        self.activation_func = SiluAndMul()

        # Project back to h.
        self.dense_4h_to_h = RowParallelLinear(
            config.ffn_hidden_size,
            config.hidden_size,
            bias=config.add_bias_linear,
151
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
        )

    def forward(self, hidden_states):
        # [s, b, 4hp]
        intermediate_parallel, _ = self.dense_h_to_4h(hidden_states)
        intermediate_parallel = self.activation_func(intermediate_parallel)
        # [s, b, h]
        output, _ = self.dense_4h_to_h(intermediate_parallel)
        return output


class GLMBlock(nn.Module):
    """A single transformer layer.

    Transformer layer takes input with size [s, b, h] and returns an
    output of the same size.
    """

    def __init__(
        self,
        config,
173
        linear_method: Optional[LinearMethodBase] = None,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
174
175
176
177
178
179
180
181
182
183
184
185
186
    ):
        super().__init__()
        self.apply_residual_connection_post_layernorm = (
            config.apply_residual_connection_post_layernorm)

        self.fp32_residual_connection = config.fp32_residual_connection

        layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
        # Layernorm on the input data.
        self.input_layernorm = layer_norm_func(config.hidden_size,
                                               eps=config.layernorm_epsilon)

        # Self attention.
187
        self.self_attention = GLMAttention(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
188
189
190
191
192
193
194
        self.hidden_dropout = config.hidden_dropout

        # Layernorm on the attention output
        self.post_attention_layernorm = layer_norm_func(
            config.hidden_size, eps=config.layernorm_epsilon)

        # MLP
195
        self.mlp = GLMMLP(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        # hidden_states: [num_tokens, h]
        # Layer norm at the beginning of the transformer layer.
        layernorm_output = self.input_layernorm(hidden_states)
        # Self attention.
        attention_output = self.self_attention(
            hidden_states=layernorm_output,
            position_ids=position_ids,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )

        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = hidden_states

        layernorm_input = residual + attention_output

        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)

        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = layernorm_input

        output = self.mlp(layernorm_output) + residual

        return output


class GLMTransformer(nn.Module):
    """Transformer class."""

242
243
244
245
246
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
247
248
249
250
251
252
253
254
        super().__init__()
        self.post_layer_norm = config.post_layer_norm

        # Number of layers.
        self.num_layers = config.num_layers

        # Transformer layers.
        self.layers = nn.ModuleList(
255
            [GLMBlock(config, linear_method) for i in range(self.num_layers)])
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

        if self.post_layer_norm:
            layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
            # Final layer norm before output.
            self.final_layernorm = layer_norm_func(
                config.hidden_size, eps=config.layernorm_epsilon)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        for i in range(self.num_layers):
272
            cache_event = None if cache_events is None else cache_events[i]
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
            layer = self.layers[i]
            hidden_states = layer(
                hidden_states=hidden_states,
                position_ids=position_ids,
                kv_cache=kv_caches[i],
                input_metadata=input_metadata,
                cache_event=cache_event,
            )
        # Final layer norm.
        if self.post_layer_norm:
            hidden_states = self.final_layernorm(hidden_states)

        return hidden_states


class ChatGLMModel(nn.Module):

290
291
292
293
294
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
295
296
297
298
299
300
301
302
        super().__init__()

        self.embedding = VocabParallelEmbedding(config.padded_vocab_size,
                                                config.hidden_size)

        self.num_layers = config.num_layers
        self.multi_query_group_num = config.multi_query_group_num
        self.kv_channels = config.kv_channels
303
        self.encoder = GLMTransformer(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
304

305
306
        self.output_layer = ParallelLMHead(config.padded_vocab_size,
                                           config.hidden_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331

    def forward(
        self,
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ):
        inputs_embeds = self.embedding(input_ids)

        # Run encoder.
        hidden_states = self.encoder(
            hidden_states=inputs_embeds,
            position_ids=position_ids,
            kv_caches=kv_caches,
            input_metadata=input_metadata,
            cache_events=cache_events,
        )

        return hidden_states


class ChatGLMForCausalLM(nn.Module):

332
333
334
335
336
    def __init__(
        self,
        config: ChatGLMConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
337
338
        super().__init__()
        self.config: ChatGLMConfig = config
339
340
        self.linear_method = linear_method
        self.transformer = ChatGLMModel(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
341
342
343
344
345
346
347
348
349
350
        self.lm_head_weight = self.transformer.output_layer.weight
        self.sampler = Sampler(config.padded_vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
351
    ) -> SamplerOutput:
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
352
353
354
355
356
357
        hidden_states = self.transformer(input_ids, positions, kv_caches,
                                         input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head_weight, hidden_states,
                                   input_metadata)
        return next_tokens

358
359
360
361
362
363
    def load_weights(self,
                     model_name_or_path: str,
                     cache_dir: Optional[str] = None,
                     load_format: str = "auto",
                     revision: Optional[str] = None):
        params_dict = dict(self.named_parameters(remove_duplicate=False))
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
364
365
        for name, loaded_weight in hf_model_weights_iterator(
                model_name_or_path, cache_dir, load_format, revision):
366
367
            if "rotary_pos_emb.inv_freq" in name:
                continue
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
368
369
            if "word_embeddings" in name:
                name = name.replace(".word_embeddings", "")
370
371
372
373
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)