jiuge.py 12.3 KB
Newer Older
PanZezhong's avatar
PanZezhong committed
1
2
3
from ctypes import POINTER, c_int, c_uint, c_void_p, byref
from pathlib import Path
import safetensors
PanZezhong's avatar
PanZezhong committed
4
import sys
PanZezhong's avatar
PanZezhong committed
5
import time
PanZezhong's avatar
PanZezhong committed
6

PanZezhong's avatar
PanZezhong committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from libinfinicore_infer import (
    JiugeMeta,
    JiugeWeights,
    KVCache,
    DataType,
    DeviceType,
    create_jiuge_model,
    create_kv_cache,
    drop_kv_cache,
    infer_batch,
)
import torch
import transformers


class LlamaWeightsNaming:
    def input_embd(self):
        return "model.embed_tokens.weight"

    def output_norm(self):
        return "model.norm.weight"

    def output_embd(self):
        return "lm_head.weight"

    def attn_norm(self, i):
        return f"model.layers.{i}.input_layernorm.weight"

    def attn_q(self, i):
        return f"model.layers.{i}.self_attn.q_proj.weight"

    def attn_k(self, i):
        return f"model.layers.{i}.self_attn.k_proj.weight"

    def attn_v(self, i):
        return f"model.layers.{i}.self_attn.v_proj.weight"

    def attn_o(self, i):
        return f"model.layers.{i}.self_attn.o_proj.weight"

    def attn_q_b(self, i):
        return f"model.layers.{i}.self_attn.q_proj.bias"

    def attn_k_b(self, i):
        return f"model.layers.{i}.self_attn.k_proj.bias"

    def attn_v_b(self, i):
        return f"model.layers.{i}.self_attn.v_proj.bias"

PanZezhong's avatar
PanZezhong committed
56
57
58
    def ffn_norm(self, i):
        return f"model.layers.{i}.post_attention_layernorm.weight"

PanZezhong's avatar
PanZezhong committed
59
60
61
62
63
64
65
66
67
    def gate(self, i):
        return f"model.layers.{i}.mlp.gate_proj.weight"

    def up(self, i):
        return f"model.layers.{i}.mlp.up_proj.weight"

    def down(self, i):
        return f"model.layers.{i}.mlp.down_proj.weight"

PanZezhong's avatar
PanZezhong committed
68
69
70
71
72
73
    def match(state_dict):
        return (
            "model.norm.weight" in state_dict
            and "model.layers.0.self_attn.q_proj.weight" in state_dict
        )

PanZezhong's avatar
PanZezhong committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

class JiugeMetaFromLlama(JiugeMeta):
    def __init__(self, config, infini_dtype):
        super().__init__(
            dt_logits=infini_dtype,
            dt_norm=infini_dtype,
            dt_mat=infini_dtype,
            nlayer=config.num_hidden_layers,
            d=config.hidden_size,
            nh=config.num_attention_heads,
            nkvh=(
                config.num_key_value_heads
                if config.num_key_value_heads
                else config.num_attention_heads
            ),
            dh=config.hidden_size // config.num_attention_heads,
            di=config.intermediate_size,
            dctx=config.max_position_embeddings,
            dvoc=config.vocab_size,
            epsilon=config.rms_norm_eps,
            theta=config.rope_theta,
            end_token=2,
        )


class JiugeWeightsImpl(JiugeWeights):
    def __init__(self, meta, naming, state_dict, ndev=1):
        nlayer = meta.nlayer
        nh = meta.nh
        nkvh = meta.nkvh
        dh = meta.dh
        d = meta.d
        di = meta.di
        assert nh % nkvh == 0
        assert nh % ndev == 0
        assert nkvh % ndev == 0
        assert di % ndev == 0
PanZezhong's avatar
PanZezhong committed
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126

        self.nlayer = nlayer
        self.input_embd_tensor = state_dict[naming.input_embd()]
        self.input_embd = self.input_embd_tensor.data_ptr()
        self.output_norm_tensor = state_dict[naming.output_norm()]
        self.output_norm = self.output_norm_tensor.data_ptr()
        self.output_embd_tensor = state_dict[naming.output_embd()]
        self.output_embd = self.output_embd_tensor.data_ptr()

        self.attn_norm_tensors = [
            state_dict[naming.attn_norm(i)] for i in range(nlayer)
        ]
        self.attn_norm_ptrs = [
            self.attn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.attn_norm = (c_void_p * nlayer)(*self.attn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

        def qkv_slices(_i):
            _Q = (
                state_dict[naming.attn_q(_i)]
                .reshape([nh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _K = (
                state_dict[naming.attn_k(_i)]
                .reshape([nkvh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _V = state_dict[naming.attn_v(_i)].reshape([nkvh, dh // 2, 2, d])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_Q[_idev * _nh : (_idev + 1) * _nh, :, :, :])
                _result.append(_K[_idev * _nkvh : (_idev + 1) * _nkvh, :, :, :])
                _result.append(_V[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

        self.qkv_tensor = [torch.concat(qkv_slices(i)) for i in range(nlayer)]
PanZezhong's avatar
PanZezhong committed
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
        self.qkv_tensor_ptrs = [self.qkv_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_qkv = (c_void_p * nlayer)(*self.qkv_tensor_ptrs)

        def qkv_b_slices(_i):
            _QB = (
                state_dict[naming.attn_q_b(_i)]
                .reshape([nh, 2, dh // 2])
                .transpose(1, 2)
            )
            _KB = (
                state_dict[naming.attn_k_b(_i)]
                .reshape([nkvh, 2, dh // 2])
                .transpose(1, 2)
            )
            _VB = state_dict[naming.attn_v_b(_i)].reshape([nkvh, dh // 2, 2])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_QB[_idev * _nh : (_idev + 1) * _nh, :, :])
                _result.append(_KB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
                _result.append(_VB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

        if naming.attn_q_b(0) in state_dict:
            self.qkv_b_tensors = [torch.concat(qkv_b_slices(i)) for i in range(nlayer)]
            self.qkv_b_tensor_ptrs = [
                self.qkv_b_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_qkv_b = (c_void_p * nlayer)(*self.qkv_b_tensor_ptrs)
        else:
            self.attn_qkv_b = None

PanZezhong's avatar
PanZezhong committed
183
184
185
186
187
188
189
        self.attn_o_tensor = [
            state_dict[naming.attn_o(i)]
            .reshape([d, ndev, nh // ndev * dh])
            .transpose(0, 1)
            .contiguous()
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
190
191
192
193
194
195
196
197
        self.attn_o_ptrs = [self.attn_o_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_o = (c_void_p * nlayer)(*self.attn_o_ptrs)

        self.ffn_norm_tensors = [state_dict[naming.ffn_norm(i)] for i in range(nlayer)]
        self.ffn_norm_ptrs = [
            self.ffn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.ffn_norm = (c_void_p * nlayer)(*self.ffn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
198
199
200
201
202
203
204
205
206
207
208

        def gate_up_slices(_i):
            _result = []
            _di = di // ndev
            for _idev in range(ndev):
                _start = _idev * _di
                _end = (_idev + 1) * _di
                _result.append(state_dict[naming.gate(_i)][_start:_end, :])
                _result.append(state_dict[naming.up(_i)][_start:_end, :])
            return _result

PanZezhong's avatar
PanZezhong committed
209
210
211
        self.gate_up_tensors = [torch.concat(gate_up_slices(i)) for i in range(nlayer)]
        self.gate_up_ptrs = [self.gate_up_tensors[i].data_ptr() for i in range(nlayer)]
        self.ffn_gate_up = (c_void_p * nlayer)(*self.gate_up_ptrs)
PanZezhong's avatar
PanZezhong committed
212
213
214
215
216
217
218
219

        self.ffn_down_tensor = [
            state_dict[naming.down(i)]
            .reshape([d, ndev, di // ndev])
            .transpose(0, 1)
            .contiguous()
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
220
221
        self.ffn_down_ptrs = [self.ffn_down_tensor[i].data_ptr() for i in range(nlayer)]
        self.ffn_down = (c_void_p * nlayer)(*self.ffn_down_ptrs)
PanZezhong's avatar
PanZezhong committed
222
223
224
225


class JiugeForCauslLM:
    def __init__(self, model_dir_path, device=DeviceType.DEVICE_TYPE_CPU, ndev=1):
PanZezhong's avatar
PanZezhong committed
226
227
228
229
230
231
232
233
234
235
236
        def load_all_safetensors_from_dir(dir_path_: str, torch_type=torch.float16):
            tensors_ = {}
            dir_path_ = Path(dir_path_)
            for file in sorted(dir_path_.glob("*.safetensors")):
                data_ = safetensors.safe_open(file, "pt")
                for name_ in data_.keys():
                    tensors_[name_] = data_.get_tensor(name_).to(torch_type)
            return tensors_

        config = transformers.AutoConfig.from_pretrained(
            model_dir_path, trust_remote_code=True
PanZezhong's avatar
PanZezhong committed
237
        )
PanZezhong's avatar
PanZezhong committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        if "llama" == config.model_type:
            model = transformers.LlamaForCausalLM.from_pretrained(model_dir_path).to(
                torch.float16
            )
            self.meta = JiugeMetaFromLlama(model.config, DataType.INFINI_DTYPE_F16)
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir_path)
            self.weights = JiugeWeightsImpl(
                self.meta, LlamaWeightsNaming(), model.state_dict(), ndev=ndev
            )
        elif "fm9g" == config.model_type:
            state_dict = load_all_safetensors_from_dir(model_dir_path)
            if LlamaWeightsNaming.match(state_dict):
                self.meta = JiugeMetaFromLlama(config, DataType.INFINI_DTYPE_F16)
                self.weights = JiugeWeightsImpl(
                    self.meta, LlamaWeightsNaming(), state_dict, ndev=ndev
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
        else:
            raise ValueError("Unsupported model architecture")
        dev_ids = (c_int * ndev)(*[i for i in range(ndev)])
PanZezhong's avatar
PanZezhong committed
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
        self.model_instance = create_jiuge_model(
            byref(self.meta),
            byref(self.weights),
            device,
            ndev,
            dev_ids,
        )

    def infer(self, input_list, topp=1.0, topk=1, temperature=1.0):
        pass

    def generate(self, input_content, max_steps, topp=1.0, topk=1, temperature=1.0):
        print(input_content, end="", flush=True)
        kv_cache = create_kv_cache(self.model_instance)
        tokens = self.tokenizer.encode(input_content)
        ntok = len(tokens)
        nreq = 1
        output_content = ""
        tokens = (c_uint * ntok)(*tokens)
        req_lens = (c_uint * nreq)(*[ntok])
        req_pos = (c_uint * nreq)(*[0])
        kv_caches = (POINTER(KVCache) * nreq)(*[kv_cache])
        ans = (c_uint * nreq)()

        steps = 0
        start_time = time.time()
        for _ in range(max_steps):
            infer_batch(
                self.model_instance,
                tokens,
                ntok,
                req_lens,
                nreq,
                req_pos,
                kv_caches,
                ans,
                temperature,
                topk,
                topp,
            )
            steps += 1
            output_tokens = list(ans)
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )
            if output_str.endswith("</s>"):
                break
            output_content += output_str
            print(output_str, end="", flush=True)
            req_pos[0] = req_pos[0] + ntok
            ntok = 1
            tokens = (c_uint * ntok)(*output_tokens)
            req_lens = (c_uint * nreq)(*[ntok])

        print("\n")
        end_time = time.time()
        avg_time = (end_time - start_time) * 1000 / steps
        print(f"Time per step: {avg_time:.3f}ms")
        for kv_cache in kv_caches:
            drop_kv_cache(self.model_instance, kv_cache)
        return output_content, avg_time
PanZezhong's avatar
PanZezhong committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352


def test():
    if len(sys.argv) < 3:
        print(
            "Usage: python test_llama.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
        )
        sys.exit(1)
    model_path = sys.argv[2]
    device_type = DeviceType.DEVICE_TYPE_CPU
    if sys.argv[1] == "--cpu":
        device_type = DeviceType.DEVICE_TYPE_CPU
    elif sys.argv[1] == "--nvidia":
        device_type = DeviceType.DEVICE_TYPE_NVIDIA
    elif sys.argv[1] == "--cambricon":
        device_type = DeviceType.DEVICE_TYPE_CAMBRICON
    elif sys.argv[1] == "--ascend":
        device_type = DeviceType.DEVICE_TYPE_ASCEND
    elif sys.argv[1] == "--metax":
        device_type = DeviceType.DEVICE_TYPE_METAX
    elif sys.argv[1] == "--moore":
        device_type = DeviceType.DEVICE_TYPE_MOORE
    else:
        print(
            "Usage: python test_llama.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
        )
        sys.exit(1)

    ndev = int(sys.argv[3]) if len(sys.argv) > 3 else 1
    model = JiugeForCauslLM(model_path, device_type, ndev)
PanZezhong's avatar
PanZezhong committed
353
    model.generate("Once upon a time,", 100)
PanZezhong's avatar
PanZezhong committed
354
355
356
357


if __name__ == "__main__":
    test()