"hipblaslt.config" did not exist on "d51e625fb840176f374409654db6a822f94d36b1"
chatglm.py 12.8 KB
Newer Older
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
1
2
3
# coding=utf-8
# Adapted from
# https://github.com/THUDM/ChatGLM2-6B
Woosuk Kwon's avatar
Woosuk Kwon committed
4
"""Inference-only ChatGLM model compatible with THUDM weights."""
5
from typing import List, Optional, Tuple
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
6
7
8
9
10
11

import torch
from torch import nn
from torch.nn import LayerNorm

from vllm.model_executor.input_metadata import InputMetadata
12
from vllm.model_executor.layers.activation import SiluAndMul
Woosuk Kwon's avatar
Woosuk Kwon committed
13
from vllm.model_executor.layers.attention import PagedAttention
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
14
from vllm.model_executor.layers.layernorm import RMSNorm
15
16
17
18
from vllm.model_executor.layers.linear import (LinearMethodBase,
                                               MergedColumnParallelLinear,
                                               QKVParallelLinear,
                                               RowParallelLinear)
Woosuk Kwon's avatar
Woosuk Kwon committed
19
from vllm.model_executor.layers.rotary_embedding import get_rope
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
20
from vllm.model_executor.layers.sampler import Sampler
21
22
from vllm.model_executor.layers.vocab_parallel_embedding import (
    VocabParallelEmbedding, ParallelLMHead)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
23
from vllm.model_executor.parallel_utils.parallel_state import (
24
25
26
27
    get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
                                              hf_model_weights_iterator)
from vllm.sequence import SamplerOutput
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
28
29
30
31
32
33
34
from vllm.transformers_utils.configs import ChatGLMConfig

KVCache = Tuple[torch.Tensor, torch.Tensor]


class GLMAttention(nn.Module):

35
36
37
38
39
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
40
41
42
43
44
45
46
47
48
49
        super().__init__()
        self.hidden_size = config.hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = config.num_attention_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.multi_query_attention = config.multi_query_attention
        self.total_num_kv_heads = (config.multi_query_group_num
                                   if config.multi_query_attention else
                                   config.num_attention_heads)
50
51
52
53
54
55
56
57
58
        if self.total_num_kv_heads >= tp_size:
            # Number of KV heads is greater than TP size, so we partition
            # the KV heads across multiple tensor parallel GPUs.
            assert self.total_num_kv_heads % tp_size == 0
        else:
            # Number of KV heads is less than TP size, so we replicate
            # the KV heads across multiple tensor parallel GPUs.
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
59
60
61
62
63
        self.head_dim = config.hidden_size // self.total_num_heads
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5

64
65
        self.query_key_value = QKVParallelLinear(
            self.hidden_size,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
66
            self.head_dim,
67
68
69
70
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=config.add_bias_linear or config.add_qkv_bias,
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
71
72
73
74
75
        )
        self.dense = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            config.hidden_size,
            bias=config.add_bias_linear,
76
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
77
78
        )

79
80
81
        # https://huggingface.co/THUDM/chatglm3-6b-32k/blob/e210410255278dd9d74463cf396ba559c0ef801c/modeling_chatglm.py#L141
        rope_ratio = getattr(config, "rope_ratio", 1.0)
        max_positions = getattr(config, "seq_length", 8192)
Woosuk Kwon's avatar
Woosuk Kwon committed
82
        self.rotary_emb = get_rope(
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
83
84
            self.head_dim,
            rotary_dim=self.head_dim // 2,
85
86
            max_position=max_positions,
            base=10000 * rope_ratio,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
87
88
            is_neox_style=False,
        )
Woosuk Kwon's avatar
Woosuk Kwon committed
89
90
91
92
93
94
        self.attn = PagedAttention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            num_kv_heads=self.num_kv_heads,
        )
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
95
96
97
98
99
100
101
102
103
104
105

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        qkv, _ = self.query_key_value(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
Woosuk Kwon's avatar
Woosuk Kwon committed
106
        q, k = self.rotary_emb(position_ids, q, k)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
        key_cache, value_cache = kv_cache
        context_layer = self.attn(
            q,
            k,
            v,
            key_cache,
            value_cache,
            input_metadata,
            cache_event,
        )
        attn_output, _ = self.dense(context_layer)
        return attn_output


class GLMMLP(nn.Module):
    """MLP.

    MLP will take the input with h hidden state, project it to 4*h
    hidden dimension, perform nonlinear transformation, and project the
    state back into h hidden dimension.
    """

129
130
131
132
133
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
134
135
136
137
138
        super().__init__()

        self.add_bias = config.add_bias_linear

        # Project to 4h.
139
        self.dense_h_to_4h = MergedColumnParallelLinear(
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
140
            config.hidden_size,
141
            [config.ffn_hidden_size] * 2,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
142
            bias=config.add_bias_linear,
143
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
144
145
146
147
148
149
150
151
152
        )

        self.activation_func = SiluAndMul()

        # Project back to h.
        self.dense_4h_to_h = RowParallelLinear(
            config.ffn_hidden_size,
            config.hidden_size,
            bias=config.add_bias_linear,
153
            linear_method=linear_method,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
        )

    def forward(self, hidden_states):
        # [s, b, 4hp]
        intermediate_parallel, _ = self.dense_h_to_4h(hidden_states)
        intermediate_parallel = self.activation_func(intermediate_parallel)
        # [s, b, h]
        output, _ = self.dense_4h_to_h(intermediate_parallel)
        return output


class GLMBlock(nn.Module):
    """A single transformer layer.

    Transformer layer takes input with size [s, b, h] and returns an
    output of the same size.
    """

    def __init__(
        self,
        config,
175
        linear_method: Optional[LinearMethodBase] = None,
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
176
177
178
179
180
181
182
183
184
185
186
187
188
    ):
        super().__init__()
        self.apply_residual_connection_post_layernorm = (
            config.apply_residual_connection_post_layernorm)

        self.fp32_residual_connection = config.fp32_residual_connection

        layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
        # Layernorm on the input data.
        self.input_layernorm = layer_norm_func(config.hidden_size,
                                               eps=config.layernorm_epsilon)

        # Self attention.
189
        self.self_attention = GLMAttention(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
190
191
192
193
194
195
196
        self.hidden_dropout = config.hidden_dropout

        # Layernorm on the attention output
        self.post_attention_layernorm = layer_norm_func(
            config.hidden_size, eps=config.layernorm_epsilon)

        # MLP
197
        self.mlp = GLMMLP(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_cache: KVCache,
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
    ) -> torch.Tensor:
        # hidden_states: [num_tokens, h]
        # Layer norm at the beginning of the transformer layer.
        layernorm_output = self.input_layernorm(hidden_states)
        # Self attention.
        attention_output = self.self_attention(
            hidden_states=layernorm_output,
            position_ids=position_ids,
            kv_cache=kv_cache,
            input_metadata=input_metadata,
            cache_event=cache_event,
        )

        # Residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = hidden_states

        layernorm_input = residual + attention_output

        # Layer norm post the self attention.
        layernorm_output = self.post_attention_layernorm(layernorm_input)

        # Second residual connection.
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = layernorm_input

        output = self.mlp(layernorm_output) + residual

        return output


class GLMTransformer(nn.Module):
    """Transformer class."""

244
245
246
247
248
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
249
250
251
252
253
254
255
256
        super().__init__()
        self.post_layer_norm = config.post_layer_norm

        # Number of layers.
        self.num_layers = config.num_layers

        # Transformer layers.
        self.layers = nn.ModuleList(
257
            [GLMBlock(config, linear_method) for i in range(self.num_layers)])
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273

        if self.post_layer_norm:
            layer_norm_func = RMSNorm if config.rmsnorm else LayerNorm
            # Final layer norm before output.
            self.final_layernorm = layer_norm_func(
                config.hidden_size, eps=config.layernorm_epsilon)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ) -> torch.Tensor:
        for i in range(self.num_layers):
274
            cache_event = None if cache_events is None else cache_events[i]
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
            layer = self.layers[i]
            hidden_states = layer(
                hidden_states=hidden_states,
                position_ids=position_ids,
                kv_cache=kv_caches[i],
                input_metadata=input_metadata,
                cache_event=cache_event,
            )
        # Final layer norm.
        if self.post_layer_norm:
            hidden_states = self.final_layernorm(hidden_states)

        return hidden_states


class ChatGLMModel(nn.Module):

292
293
294
295
296
    def __init__(
        self,
        config,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
297
298
299
300
301
302
303
304
        super().__init__()

        self.embedding = VocabParallelEmbedding(config.padded_vocab_size,
                                                config.hidden_size)

        self.num_layers = config.num_layers
        self.multi_query_group_num = config.multi_query_group_num
        self.kv_channels = config.kv_channels
305
        self.encoder = GLMTransformer(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
306

307
308
        self.output_layer = ParallelLMHead(config.padded_vocab_size,
                                           config.hidden_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

    def forward(
        self,
        input_ids: torch.Tensor,
        position_ids: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
    ):
        inputs_embeds = self.embedding(input_ids)

        # Run encoder.
        hidden_states = self.encoder(
            hidden_states=inputs_embeds,
            position_ids=position_ids,
            kv_caches=kv_caches,
            input_metadata=input_metadata,
            cache_events=cache_events,
        )

        return hidden_states


class ChatGLMForCausalLM(nn.Module):

334
335
336
337
338
    def __init__(
        self,
        config: ChatGLMConfig,
        linear_method: Optional[LinearMethodBase] = None,
    ):
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
339
340
        super().__init__()
        self.config: ChatGLMConfig = config
341
342
        self.linear_method = linear_method
        self.transformer = ChatGLMModel(config, linear_method)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
343
344
345
346
347
348
349
350
351
352
        self.lm_head_weight = self.transformer.output_layer.weight
        self.sampler = Sampler(config.padded_vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        kv_caches: List[KVCache],
        input_metadata: InputMetadata,
        cache_events: Optional[List[torch.cuda.Event]],
353
    ) -> SamplerOutput:
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
354
355
356
357
358
359
        hidden_states = self.transformer(input_ids, positions, kv_caches,
                                         input_metadata, cache_events)
        next_tokens = self.sampler(self.lm_head_weight, hidden_states,
                                   input_metadata)
        return next_tokens

360
361
362
363
364
365
    def load_weights(self,
                     model_name_or_path: str,
                     cache_dir: Optional[str] = None,
                     load_format: str = "auto",
                     revision: Optional[str] = None):
        params_dict = dict(self.named_parameters(remove_duplicate=False))
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
366
367
        for name, loaded_weight in hf_model_weights_iterator(
                model_name_or_path, cache_dir, load_format, revision):
368
369
            if "rotary_pos_emb.inv_freq" in name:
                continue
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
370
371
            if "word_embeddings" in name:
                name = name.replace(".word_embeddings", "")
372
373
374
375
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)