chatglm.py 12.6 KB
Newer Older
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
1
2
3
# coding=utf-8
# Adapted from
# https://github.com/THUDM/ChatGLM2-6B
Woosuk Kwon's avatar
Woosuk Kwon committed
4
"""Inference-only ChatGLM model compatible with THUDM weights."""
5
from typing import List, Optional, Tuple
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
6
7
8
9
10
11

import torch
from torch import nn
from torch.nn import LayerNorm

from vllm.model_executor.input_metadata import InputMetadata
12
from vllm.model_executor.layers.activation import SiluAndMul
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
13
14
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.layernorm import RMSNorm
15
16
17
18
from vllm.model_executor.layers.linear import (LinearMethodBase,
                                               MergedColumnParallelLinear,
                                               QKVParallelLinear,
                                               RowParallelLinear)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
19
from vllm.model_executor.layers.sampler import Sampler
20
21
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
22
from vllm.model_executor.parallel_utils.parallel_state import (
23
24
25
26
    get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
from vllm.sequence import SamplerOutput
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
27
28
29
30
31
32
33
from vllm.transformers_utils.configs import ChatGLMConfig

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GLMAttention(nn.Module):

34
35
36
37
38
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
39
40
41
42
43
44
45
46
47
48
        super().__init__()
        self.hidden_size = config.hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = config.num_attention_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.multi_query_attention = config.multi_query_attention
        self.total_num_kv_heads = (config.multi_query_group_num
                                   if config.multi_query_attention else
                                   config.num_attention_heads)
49
50
51
52
53
54
55
56
57
        if self.total_num_kv_heads >= tp_size:
            # Number of KV heads is greater than TP size, so we partition
            # the KV heads across multiple tensor parallel GPUs.
            assert self.total_num_kv_heads % tp_size == 0
        else:
            # Number of KV heads is less than TP size, so we replicate
            # the KV heads across multiple tensor parallel GPUs.
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
58
59
60
61
62
        self.head_dim = config.hidden_size // self.total_num_heads
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5

63
64
        self.query_key_value = QKVParallelLinear(
            self.hidden_size,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
65
            self.head_dim,
66
67
68
69
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=config.add_bias_linear or config.add_qkv_bias,
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
70
71
72
73
74
        )
        self.dense = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            config.hidden_size,
            bias=config.add_bias_linear,
75
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
76
77
        )

78
79
80
        # https://huggingface.co/THUDM/chatglm3-6b-32k/blob/e210410255278dd9d74463cf396ba559c0ef801c/modeling_chatglm.py#L141
        rope_ratio = getattr(config, "rope_ratio", 1.0)
        max_positions = getattr(config, "seq_length", 8192)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
81
82
83
84
85
86
        self.attn = PagedAttentionWithRoPE(
            self.num_heads,
            self.head_dim,
            self.scaling,
            rotary_dim=self.head_dim // 2,
            num_kv_heads=self.num_kv_heads,
87
88
            max_position=max_positions,
            base=10000 * rope_ratio,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
            is_neox_style=False,
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.query_key_value(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
        key_cache, value_cache = kv_cache

        context_layer = self.attn(
            position_ids,
            q,
            k,
            v,
            key_cache,
            value_cache,
            input_metadata,
            cache_event,
        )

        attn_output, _ = self.dense(context_layer)

        return attn_output


class GLMMLP(nn.Module):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
    state back into h hidden dimension.
    """

128
129
130
131
132
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
133
134
135
136
137
        super().__init__()

        self.add_bias = config.add_bias_linear

        # Project to 4h.
138
        self.dense_h_to_4h = MergedColumnParallelLinear(
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
139
            config.hidden_size,
140
            [config.ffn_hidden_size] * 2,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
141
            bias=config.add_bias_linear,
142
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
143
144
145
146
147
148
149
150
151
        )

        self.activation_func = SiluAndMul()

        # Project back to h.
        self.dense_4h_to_h = RowParallelLinear(
            config.ffn_hidden_size,
            config.hidden_size,
            bias=config.add_bias_linear,
152
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
        )

    def forward(self, hidden_states):
        # [s, b, 4hp]
        intermediate_parallel, _ = self.dense_h_to_4h(hidden_states)
        intermediate_parallel = self.activation_func(intermediate_parallel)
        # [s, b, h]
        output, _ = self.dense_4h_to_h(intermediate_parallel)
        return output


class GLMBlock(nn.Module):
    """A single transformer layer.

    Transformer layer takes input with size [s, b, h] and returns an
    output of the same size.
    """

    def __init__(
        self,
        config,
174
        linear_method: Optional[LinearMethodBase] = None,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
175
176
177
178
179
180
181
182
183
184
185
186
187
    ):
        super().__init__()
        self.apply_residual_connection_post_layernorm = (
            config.apply_residual_connection_post_layernorm)

        self.fp32_residual_connection = config.fp32_residual_connection

        layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
        # Layernorm on the input data.
        self.input_layernorm = layer_norm_func(config.hidden_size,
                                               eps=config.layernorm_epsilon)

        # Self attention.
188
        self.self_attention = GLMAttention(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
189
190
191
192
193
194
195
        self.hidden_dropout = config.hidden_dropout

        # Layernorm on the attention output
        self.post_attention_layernorm = layer_norm_func(
            config.hidden_size, eps=config.layernorm_epsilon)

        # MLP
196
        self.mlp = GLMMLP(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        # hidden_states: [num_tokens, h]
        # Layer norm at the beginning of the transformer layer.
        layernorm_output = self.input_layernorm(hidden_states)
        # Self attention.
        attention_output = self.self_attention(
            hidden_states=layernorm_output,
            position_ids=position_ids,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )

        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = hidden_states

        layernorm_input = residual + attention_output

        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)

        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = layernorm_input

        output = self.mlp(layernorm_output) + residual

        return output


class GLMTransformer(nn.Module):
    """Transformer class."""

243
244
245
246
247
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
248
249
250
251
252
253
254
255
        super().__init__()
        self.post_layer_norm = config.post_layer_norm

        # Number of layers.
        self.num_layers = config.num_layers

        # Transformer layers.
        self.layers = nn.ModuleList(
256
            [GLMBlock(config, linear_method) for i in range(self.num_layers)])
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

        if self.post_layer_norm:
            layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
            # Final layer norm before output.
            self.final_layernorm = layer_norm_func(
                config.hidden_size, eps=config.layernorm_epsilon)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        for i in range(self.num_layers):
273
            cache_event = None if cache_events is None else cache_events[i]
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
            layer = self.layers[i]
            hidden_states = layer(
                hidden_states=hidden_states,
                position_ids=position_ids,
                kv_cache=kv_caches[i],
                input_metadata=input_metadata,
                cache_event=cache_event,
            )
        # Final layer norm.
        if self.post_layer_norm:
            hidden_states = self.final_layernorm(hidden_states)

        return hidden_states


class ChatGLMModel(nn.Module):

291
292
293
294
295
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
296
297
298
299
300
301
302
303
        super().__init__()

        self.embedding = VocabParallelEmbedding(config.padded_vocab_size,
                                                config.hidden_size)

        self.num_layers = config.num_layers
        self.multi_query_group_num = config.multi_query_group_num
        self.kv_channels = config.kv_channels
304
        self.encoder = GLMTransformer(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
305

306
307
        self.output_layer = ParallelLMHead(config.padded_vocab_size,
                                           config.hidden_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332

    def forward(
        self,
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ):
        inputs_embeds = self.embedding(input_ids)

        # Run encoder.
        hidden_states = self.encoder(
            hidden_states=inputs_embeds,
            position_ids=position_ids,
            kv_caches=kv_caches,
            input_metadata=input_metadata,
            cache_events=cache_events,
        )

        return hidden_states


class ChatGLMForCausalLM(nn.Module):

333
334
335
336
337
    def __init__(
        self,
        config: ChatGLMConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
338
339
        super().__init__()
        self.config: ChatGLMConfig = config
340
341
        self.linear_method = linear_method
        self.transformer = ChatGLMModel(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
342
343
344
345
346
347
348
349
350
351
        self.lm_head_weight = self.transformer.output_layer.weight
        self.sampler = Sampler(config.padded_vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
352
    ) -> SamplerOutput:
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
353
354
355
356
357
358
        hidden_states = self.transformer(input_ids, positions, kv_caches,
                                         input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head_weight, hidden_states,
                                   input_metadata)
        return next_tokens

359
360
361
362
363
364
    def load_weights(self,
                     model_name_or_path: str,
                     cache_dir: Optional[str] = None,
                     load_format: str = "auto",
                     revision: Optional[str] = None):
        params_dict = dict(self.named_parameters(remove_duplicate=False))
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
365
366
        for name, loaded_weight in hf_model_weights_iterator(
                model_name_or_path, cache_dir, load_format, revision):
367
368
            if "rotary_pos_emb.inv_freq" in name:
                continue
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
369
370
            if "word_embeddings" in name:
                name = name.replace(".word_embeddings", "")
371
372
373
374
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)