jiuge.py 21 KB
Newer Older
PanZezhong's avatar
PanZezhong committed
1
from ctypes import POINTER, c_int, c_uint, c_void_p, byref
Pan Zezhong's avatar
Pan Zezhong committed
2
import os
PanZezhong's avatar
PanZezhong committed
3
4
from pathlib import Path
import safetensors
PanZezhong's avatar
PanZezhong committed
5
import sys
PanZezhong's avatar
PanZezhong committed
6
import time
Pan Zezhong's avatar
Pan Zezhong committed
7
import json
Pan Zezhong's avatar
Pan Zezhong committed
8
import asyncio
PanZezhong's avatar
PanZezhong committed
9

PanZezhong's avatar
PanZezhong committed
10
11
12
13
14
15
16
from libinfinicore_infer import (
    JiugeMeta,
    JiugeWeights,
    KVCache,
    DataType,
    DeviceType,
    create_jiuge_model,
PanZezhong's avatar
PanZezhong committed
17
    destroy_jiuge_model,
PanZezhong's avatar
PanZezhong committed
18
19
20
21
22
23
24
    create_kv_cache,
    drop_kv_cache,
    infer_batch,
)
import torch
import transformers

Pan Zezhong's avatar
Pan Zezhong committed
25
torch.set_default_device("cpu")
PanZezhong's avatar
PanZezhong committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

class LlamaWeightsNaming:
    def input_embd(self):
        return "model.embed_tokens.weight"

    def output_norm(self):
        return "model.norm.weight"

    def output_embd(self):
        return "lm_head.weight"

    def attn_norm(self, i):
        return f"model.layers.{i}.input_layernorm.weight"

    def attn_q(self, i):
        return f"model.layers.{i}.self_attn.q_proj.weight"

    def attn_k(self, i):
        return f"model.layers.{i}.self_attn.k_proj.weight"

    def attn_v(self, i):
        return f"model.layers.{i}.self_attn.v_proj.weight"

    def attn_o(self, i):
        return f"model.layers.{i}.self_attn.o_proj.weight"

    def attn_q_b(self, i):
        return f"model.layers.{i}.self_attn.q_proj.bias"

    def attn_k_b(self, i):
        return f"model.layers.{i}.self_attn.k_proj.bias"

    def attn_v_b(self, i):
        return f"model.layers.{i}.self_attn.v_proj.bias"

PanZezhong's avatar
PanZezhong committed
61
62
63
    def ffn_norm(self, i):
        return f"model.layers.{i}.post_attention_layernorm.weight"

PanZezhong's avatar
PanZezhong committed
64
65
66
67
68
69
70
71
72
    def gate(self, i):
        return f"model.layers.{i}.mlp.gate_proj.weight"

    def up(self, i):
        return f"model.layers.{i}.mlp.up_proj.weight"

    def down(self, i):
        return f"model.layers.{i}.mlp.down_proj.weight"

PanZezhong's avatar
PanZezhong committed
73
74
75
76
77
78
    def match(state_dict):
        return (
            "model.norm.weight" in state_dict
            and "model.layers.0.self_attn.q_proj.weight" in state_dict
        )

PanZezhong's avatar
PanZezhong committed
79
80

class JiugeMetaFromLlama(JiugeMeta):
Pan Zezhong's avatar
Pan Zezhong committed
81
82
    def __init__(self, config, dtype=torch.float16):
        if dtype == torch.float16:
PanZezhong's avatar
PanZezhong committed
83
84
85
86
87
            dt_ = DataType.INFINI_DTYPE_F16
        elif dtype == torch.float32:
            dt_ = DataType.INFINI_DTYPE_F32
        else:
            dt_ = DataType.INFINI_DTYPE_F16
PanZezhong's avatar
PanZezhong committed
88
        super().__init__(
PanZezhong's avatar
PanZezhong committed
89
            dt_logits=dt_,
Pan Zezhong's avatar
Pan Zezhong committed
90
91
92
            nlayer=config["num_hidden_layers"],
            d=config["hidden_size"],
            nh=config["num_attention_heads"],
PanZezhong's avatar
PanZezhong committed
93
            nkvh=(
Pan Zezhong's avatar
Pan Zezhong committed
94
95
96
                config["num_key_value_heads"]
                if "num_key_value_heads" in config
                else config["num_attention_heads"]
PanZezhong's avatar
PanZezhong committed
97
            ),
Pan Zezhong's avatar
Pan Zezhong committed
98
99
100
101
102
103
            dh=config["hidden_size"] // config["num_attention_heads"],
            di=config["intermediate_size"],
            dctx=config["max_position_embeddings"],
            dvoc=config["vocab_size"],
            epsilon=config["rms_norm_eps"],
            theta=(config["rope_theta"] if "rope_theta" in config else 100000.0),
PanZezhong's avatar
PanZezhong committed
104
105
            end_token=2,
        )
PanZezhong's avatar
PanZezhong committed
106
        self.torch_dtype_logits = dtype
PanZezhong's avatar
PanZezhong committed
107
108
109


class JiugeWeightsImpl(JiugeWeights):
Pan Zezhong's avatar
Pan Zezhong committed
110
111
112
113
114
115
116
117
    def __init__(
        self,
        meta,
        naming,
        state_dict,
        torch_dt_mat=torch.float16,
        torch_dt_norm=torch.float32,
        ndev=1,
PanZezhong's avatar
PanZezhong committed
118
        transpose_weight=True,
Pan Zezhong's avatar
Pan Zezhong committed
119
    ):
PanZezhong's avatar
PanZezhong committed
120
121
122
123
124
125
126
127
128
129
        nlayer = meta.nlayer
        nh = meta.nh
        nkvh = meta.nkvh
        dh = meta.dh
        d = meta.d
        di = meta.di
        assert nh % nkvh == 0
        assert nh % ndev == 0
        assert nkvh % ndev == 0
        assert di % ndev == 0
PanZezhong's avatar
PanZezhong committed
130
131
132
133
134
135
136
137
138
139
140
141
142
        torch_dt_logits = meta.torch_dtype_logits
        if torch_dt_mat == torch.float16:
            self.dt_mat = DataType.INFINI_DTYPE_F16
        elif torch_dt_mat == torch.float32:
            self.dt_mat = DataType.INFINI_DTYPE_F32
        else:
            raise ValueError("Unsupported proj weight data type")
        if torch_dt_norm == torch.float16:
            self.dt_norm = DataType.INFINI_DTYPE_F16
        elif torch_dt_norm == torch.float32:
            self.dt_norm = DataType.INFINI_DTYPE_F32
        else:
            raise ValueError("Unsupported norm weight data type")
PanZezhong's avatar
PanZezhong committed
143

Pan Zezhong's avatar
Pan Zezhong committed
144
145
146
147
148
149
150
151
152
153
        input_embd_naming = (
            naming.input_embd()
            if naming.input_embd() in state_dict
            else naming.output_embd()
        )
        output_embd_naming = (
            naming.output_embd()
            if naming.output_embd() in state_dict
            else naming.input_embd()
        )
PanZezhong's avatar
PanZezhong committed
154
        self.transpose_linear_weights = 1 if transpose_weight else 0
PanZezhong's avatar
PanZezhong committed
155
        self.nlayer = nlayer
Pan Zezhong's avatar
Pan Zezhong committed
156
        self.input_embd_tensor = state_dict[input_embd_naming].to(torch_dt_logits)
PanZezhong's avatar
PanZezhong committed
157
        self.input_embd = self.input_embd_tensor.data_ptr()
PanZezhong's avatar
PanZezhong committed
158
        self.output_norm_tensor = state_dict[naming.output_norm()].to(torch_dt_norm)
PanZezhong's avatar
PanZezhong committed
159
        self.output_norm = self.output_norm_tensor.data_ptr()
Pan Zezhong's avatar
Pan Zezhong committed
160
        self.output_embd_tensor = state_dict[output_embd_naming].to(torch_dt_mat)
PanZezhong's avatar
PanZezhong committed
161
162
        if not transpose_weight:
            self.output_embd_tensor = self.output_embd_tensor.transpose(0, 1).contiguous()
PanZezhong's avatar
PanZezhong committed
163
164
165
        self.output_embd = self.output_embd_tensor.data_ptr()

        self.attn_norm_tensors = [
PanZezhong's avatar
PanZezhong committed
166
            state_dict[naming.attn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
PanZezhong's avatar
PanZezhong committed
167
168
169
170
171
        ]
        self.attn_norm_ptrs = [
            self.attn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.attn_norm = (c_void_p * nlayer)(*self.attn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

        def qkv_slices(_i):
            _Q = (
                state_dict[naming.attn_q(_i)]
                .reshape([nh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _K = (
                state_dict[naming.attn_k(_i)]
                .reshape([nkvh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _V = state_dict[naming.attn_v(_i)].reshape([nkvh, dh // 2, 2, d])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_Q[_idev * _nh : (_idev + 1) * _nh, :, :, :])
                _result.append(_K[_idev * _nkvh : (_idev + 1) * _nkvh, :, :, :])
                _result.append(_V[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
194
195
196
        self.qkv_tensor = [
            torch.concat(qkv_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
197
198
199
        if not transpose_weight:
            for i in range(nlayer):
                self.qkv_tensor[i] = self.qkv_tensor[i].reshape(ndev, (nh + 2 * nkvh) // ndev * dh, d).transpose(1, 2).contiguous()
PanZezhong's avatar
PanZezhong committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
        self.qkv_tensor_ptrs = [self.qkv_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_qkv = (c_void_p * nlayer)(*self.qkv_tensor_ptrs)

        def qkv_b_slices(_i):
            _QB = (
                state_dict[naming.attn_q_b(_i)]
                .reshape([nh, 2, dh // 2])
                .transpose(1, 2)
            )
            _KB = (
                state_dict[naming.attn_k_b(_i)]
                .reshape([nkvh, 2, dh // 2])
                .transpose(1, 2)
            )
            _VB = state_dict[naming.attn_v_b(_i)].reshape([nkvh, dh // 2, 2])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
Pan Zezhong's avatar
Pan Zezhong committed
219
220
221
                _result.append(_QB[_idev * _nh : (_idev + 1) * _nh, :, :].flatten())
                _result.append(_KB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
                _result.append(_VB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
PanZezhong's avatar
PanZezhong committed
222
223
224
            return _result

        if naming.attn_q_b(0) in state_dict:
Pan Zezhong's avatar
Pan Zezhong committed
225
226
227
            self.qkv_b_tensors = [
                torch.concat(qkv_b_slices(i)).to(torch_dt_logits) for i in range(nlayer)
            ]
PanZezhong's avatar
PanZezhong committed
228
229
230
231
232
233
234
            self.qkv_b_tensor_ptrs = [
                self.qkv_b_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_qkv_b = (c_void_p * nlayer)(*self.qkv_b_tensor_ptrs)
        else:
            self.attn_qkv_b = None

PanZezhong's avatar
PanZezhong committed
235
        self.attn_o_tensor = [
Pan Zezhong's avatar
Pan Zezhong committed
236
            state_dict[naming.attn_o(i)]
PanZezhong's avatar
PanZezhong committed
237
238
239
240
241
242
                .to(torch_dt_mat)
                .reshape([d, ndev, nh // ndev * dh])
                .transpose(0, 1)
                .contiguous()
            if transpose_weight 
            else state_dict[naming.attn_o(i)].transpose(0, 1).to(torch_dt_mat).contiguous()
PanZezhong's avatar
PanZezhong committed
243
244
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
245
246
247
        self.attn_o_ptrs = [self.attn_o_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_o = (c_void_p * nlayer)(*self.attn_o_ptrs)

Pan Zezhong's avatar
Pan Zezhong committed
248
249
250
        self.ffn_norm_tensors = [
            state_dict[naming.ffn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
251
252
253
254
        self.ffn_norm_ptrs = [
            self.ffn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.ffn_norm = (c_void_p * nlayer)(*self.ffn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
255
256
257
258
259
260
261
262
263
264
265

        def gate_up_slices(_i):
            _result = []
            _di = di // ndev
            for _idev in range(ndev):
                _start = _idev * _di
                _end = (_idev + 1) * _di
                _result.append(state_dict[naming.gate(_i)][_start:_end, :])
                _result.append(state_dict[naming.up(_i)][_start:_end, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
266
267
268
        self.gate_up_tensors = [
            torch.concat(gate_up_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
269
270
271
        if not transpose_weight:
            for i in range(nlayer):
                self.gate_up_tensors[i] = self.gate_up_tensors[i].reshape(ndev, 2 * di // ndev, d).transpose(1, 2).contiguous()
PanZezhong's avatar
PanZezhong committed
272
273
        self.gate_up_ptrs = [self.gate_up_tensors[i].data_ptr() for i in range(nlayer)]
        self.ffn_gate_up = (c_void_p * nlayer)(*self.gate_up_ptrs)
PanZezhong's avatar
PanZezhong committed
274
275

        self.ffn_down_tensor = [
Pan Zezhong's avatar
Pan Zezhong committed
276
277
            state_dict[naming.down(i)]
            .to(torch_dt_mat)
PanZezhong's avatar
PanZezhong committed
278
279
280
            .reshape([d, ndev, di // ndev])
            .transpose(0, 1)
            .contiguous()
PanZezhong's avatar
PanZezhong committed
281
282
            if transpose_weight
            else state_dict[naming.down(i)].transpose(0, 1).to(torch_dt_mat).contiguous()
PanZezhong's avatar
PanZezhong committed
283
284
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
285
286
        self.ffn_down_ptrs = [self.ffn_down_tensor[i].data_ptr() for i in range(nlayer)]
        self.ffn_down = (c_void_p * nlayer)(*self.ffn_down_ptrs)
PanZezhong's avatar
PanZezhong committed
287
288
289
290


class JiugeForCauslLM:
    def __init__(self, model_dir_path, device=DeviceType.DEVICE_TYPE_CPU, ndev=1):
PanZezhong's avatar
PanZezhong committed
291
        def load_all_safetensors_from_dir(dir_path_: str):
PanZezhong's avatar
PanZezhong committed
292
293
294
295
296
            tensors_ = {}
            dir_path_ = Path(dir_path_)
            for file in sorted(dir_path_.glob("*.safetensors")):
                data_ = safetensors.safe_open(file, "pt")
                for name_ in data_.keys():
PanZezhong's avatar
PanZezhong committed
297
                    tensors_[name_] = data_.get_tensor(name_)
PanZezhong's avatar
PanZezhong committed
298
            return tensors_
PanZezhong's avatar
PanZezhong committed
299
300
301
        
        print("Loading model weights to host...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
302

Pan Zezhong's avatar
Pan Zezhong committed
303
304
        with open(os.path.join(model_dir_path, "config.json"), "r") as f:
            config = json.load(f)
PanZezhong's avatar
PanZezhong committed
305
306
307
            self.config = config
        eos_token_id = self.config["eos_token_id"]
        self.eos_token_id = [eos_token_id] if type(eos_token_id) == int else eos_token_id
PanZezhong's avatar
PanZezhong committed
308
        transpose_weight = device != DeviceType.DEVICE_TYPE_ASCEND # y = xW is faster than y=xW^T on Ascend
Pan Zezhong's avatar
Pan Zezhong committed
309
310
311
        if "llama" == config["model_type"]:
            model = transformers.LlamaForCausalLM.from_pretrained(model_dir_path).cpu().half()
            self.meta = JiugeMetaFromLlama(config)
PanZezhong's avatar
PanZezhong committed
312
313
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir_path)
            self.weights = JiugeWeightsImpl(
PanZezhong's avatar
PanZezhong committed
314
                self.meta, LlamaWeightsNaming(), model.state_dict(), ndev=ndev, transpose_weight=transpose_weight
PanZezhong's avatar
PanZezhong committed
315
            )
Pan Zezhong's avatar
Pan Zezhong committed
316
        elif "fm9g" == config["model_type"]:
PanZezhong's avatar
PanZezhong committed
317
318
319
320
321
322
            if any(file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()):
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
                os.path.join(model_dir_path, "pytorch_model.bin"), weights_only=True, map_location="cpu"
            )
PanZezhong's avatar
PanZezhong committed
323
            if LlamaWeightsNaming.match(state_dict):
PanZezhong's avatar
PanZezhong committed
324
                self.meta = JiugeMetaFromLlama(config)
PanZezhong's avatar
PanZezhong committed
325
                self.weights = JiugeWeightsImpl(
PanZezhong's avatar
PanZezhong committed
326
                    self.meta, LlamaWeightsNaming(), state_dict, ndev=ndev, transpose_weight=transpose_weight
PanZezhong's avatar
PanZezhong committed
327
328
329
330
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
331
332
            else:
                raise ValueError("Unsupported weight naming")
Pan Zezhong's avatar
Pan Zezhong committed
333
334
335
336
337
338
339
        elif "fm9g7b" == config["model_type"]:
            state_dict = torch.load(
                os.path.join(model_dir_path, "pytorch_model.bin"), weights_only=True, map_location="cpu"
            )
            if LlamaWeightsNaming.match(state_dict):
                self.meta = JiugeMetaFromLlama(config)
                self.weights = JiugeWeightsImpl(
PanZezhong's avatar
PanZezhong committed
340
                    self.meta, LlamaWeightsNaming(), state_dict, ndev=ndev, transpose_weight=transpose_weight
Pan Zezhong's avatar
Pan Zezhong committed
341
342
343
344
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
345
346
347
348
349
350
351
            else:
                raise ValueError("Unsupported weight naming")
        elif "qwen2" == config["model_type"]:
            state_dict = load_all_safetensors_from_dir(model_dir_path)
            if LlamaWeightsNaming.match(state_dict):
                self.meta = JiugeMetaFromLlama(config)
                self.weights = JiugeWeightsImpl(
PanZezhong's avatar
PanZezhong committed
352
                    self.meta, LlamaWeightsNaming(), state_dict, ndev=ndev, transpose_weight=transpose_weight
PanZezhong's avatar
PanZezhong committed
353
354
355
356
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path
                )
PanZezhong's avatar
PanZezhong committed
357
358
        else:
            raise ValueError("Unsupported model architecture")
PanZezhong's avatar
PanZezhong committed
359
360
361
362
363
364

        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
        
        print(f"Creating model on {ndev} devices...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
365
        dev_ids = (c_int * ndev)(*[i for i in range(ndev)])
PanZezhong's avatar
PanZezhong committed
366
367
368
369
370
371
372
        self.model_instance = create_jiuge_model(
            byref(self.meta),
            byref(self.weights),
            device,
            ndev,
            dev_ids,
        )
PanZezhong's avatar
PanZezhong committed
373
374
375
        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
        
Pan Zezhong's avatar
Pan Zezhong committed
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    def create_kv_cache(self):
        return create_kv_cache(self.model_instance)

    def drop_kv_cache(self, kv_cache):
        drop_kv_cache(self.model_instance, kv_cache)

    def chat(self, request, kv_cache):
        messages = request.get("messages", [])
        temperature = request.get("temperature", 1.0)
        topk = request.get("top_k", 1)
        topp = request.get("top_p", 1.0)
        max_tokens = request.get("max_tokens", 512)
        input_content = self.tokenizer.apply_chat_template(
            conversation=messages,
            add_generation_prompt=True,
            tokenize=False,
        )

        tokens = self.tokenizer.encode(input_content)
        ntok = len(tokens)
        nreq = 1
        output_content = ""
        tokens = (c_uint * ntok)(*tokens)
        req_lens = (c_uint * nreq)(*[ntok])
        req_pos = (c_uint * nreq)(*[0])
        kv_caches = (POINTER(KVCache) * nreq)(*[kv_cache])
        ans = (c_uint * nreq)()

        steps = 0
        for step_i in range(max_tokens):
            infer_batch(
                self.model_instance,
                tokens,
                ntok,
                req_lens,
                nreq,
                req_pos,
                kv_caches,
                ans,
                temperature,
                topk,
                topp,
            )
            steps += 1
            output_tokens = list(ans)
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )
            output_content += output_str

            if output_tokens[0] in self.eos_token_id:
                break
            req_pos[0] = req_pos[0] + ntok
            ntok = 1
            tokens = (c_uint * ntok)(*output_tokens)
            req_lens = (c_uint * nreq)(*[ntok])

PanZezhong's avatar
PanZezhong committed
435

Pan Zezhong's avatar
Pan Zezhong committed
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
        return output_content
    
    async def chat_stream_async(self, request, kv_cache):
        messages = request.get("messages", [])
        temperature = request.get("temperature", 1.0)
        topk = request.get("top_k", 1)
        topp = request.get("top_p", 1.0)
        max_tokens = request.get("max_tokens", 512)

        input_content = self.tokenizer.apply_chat_template(
            conversation=messages,
            add_generation_prompt=True,
            tokenize=False,
        )

        tokens = self.tokenizer.encode(input_content)
        ntok = len(tokens)
        nreq = 1
        tokens = (c_uint * ntok)(*tokens)
        req_lens = (c_uint * nreq)(*[ntok])
        req_pos = (c_uint * nreq)(*[0])
        kv_caches = (POINTER(KVCache) * nreq)(*[kv_cache])
        ans = (c_uint * nreq)()

        for step_i in range(max_tokens):
            infer_batch(
                self.model_instance,
                tokens,
                ntok,
                req_lens,
                nreq,
                req_pos,
                kv_caches,
                ans,
                temperature,
                topk,
                topp,
            )

            output_tokens = list(ans)
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )

            yield output_str  # Yield each token as it's produced
            await asyncio.sleep(0)  # Let event loop breathe

            if output_tokens[0] in self.eos_token_id:
                break

            req_pos[0] += ntok
            ntok = 1
            tokens = (c_uint * ntok)(*output_tokens)
            req_lens = (c_uint * nreq)(*[ntok])
PanZezhong's avatar
PanZezhong committed
492
493

    def generate(self, input_content, max_steps, topp=1.0, topk=1, temperature=1.0):
PanZezhong's avatar
PanZezhong committed
494
        kv_cache = create_kv_cache(self.model_instance)
Pan Zezhong's avatar
Pan Zezhong committed
495
496
497
498
499
        input_content = self.tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": input_content}],
            add_generation_prompt=True,
            tokenize=False,
        )
PanZezhong's avatar
PanZezhong committed
500
501
502
503
504
505
506
507
508
509
510
511
        print(input_content, end="", flush=True)
        tokens = self.tokenizer.encode(input_content)
        ntok = len(tokens)
        nreq = 1
        output_content = ""
        tokens = (c_uint * ntok)(*tokens)
        req_lens = (c_uint * nreq)(*[ntok])
        req_pos = (c_uint * nreq)(*[0])
        kv_caches = (POINTER(KVCache) * nreq)(*[kv_cache])
        ans = (c_uint * nreq)()

        steps = 0
Pan Zezhong's avatar
Pan Zezhong committed
512
513
514
515
        total_time = 0

        for step_i in range(max_steps):
            start_time = time.time()
PanZezhong's avatar
PanZezhong committed
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
            infer_batch(
                self.model_instance,
                tokens,
                ntok,
                req_lens,
                nreq,
                req_pos,
                kv_caches,
                ans,
                temperature,
                topk,
                topp,
            )
            steps += 1
            output_tokens = list(ans)
PanZezhong's avatar
PanZezhong committed
531
            end_time = time.time()
PanZezhong's avatar
PanZezhong committed
532
533
534
535
536
537
538
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )
            output_content += output_str
            print(output_str, end="", flush=True)
PanZezhong's avatar
PanZezhong committed
539
540
            if output_tokens[0] in self.eos_token_id:
                break
PanZezhong's avatar
PanZezhong committed
541
542
543
544
            req_pos[0] = req_pos[0] + ntok
            ntok = 1
            tokens = (c_uint * ntok)(*output_tokens)
            req_lens = (c_uint * nreq)(*[ntok])
PanZezhong's avatar
PanZezhong committed
545
            
Pan Zezhong's avatar
Pan Zezhong committed
546
547
            if step_i > 0:
                total_time += end_time - start_time
PanZezhong's avatar
PanZezhong committed
548
549

        print("\n")
Pan Zezhong's avatar
Pan Zezhong committed
550
        avg_time = total_time * 1000 / (steps - 1)
PanZezhong's avatar
PanZezhong committed
551
552
553
554
        print(f"Time per step: {avg_time:.3f}ms")
        for kv_cache in kv_caches:
            drop_kv_cache(self.model_instance, kv_cache)
        return output_content, avg_time
PanZezhong's avatar
PanZezhong committed
555
556
557
558
    
    def destroy_model_instance(self):
        destroy_jiuge_model(self.model_instance)
        print("Model destroyed")
PanZezhong's avatar
PanZezhong committed
559
560
561
562
563


def test():
    if len(sys.argv) < 3:
        print(
Pan Zezhong's avatar
Pan Zezhong committed
564
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
        )
        sys.exit(1)
    model_path = sys.argv[2]
    device_type = DeviceType.DEVICE_TYPE_CPU
    if sys.argv[1] == "--cpu":
        device_type = DeviceType.DEVICE_TYPE_CPU
    elif sys.argv[1] == "--nvidia":
        device_type = DeviceType.DEVICE_TYPE_NVIDIA
    elif sys.argv[1] == "--cambricon":
        device_type = DeviceType.DEVICE_TYPE_CAMBRICON
    elif sys.argv[1] == "--ascend":
        device_type = DeviceType.DEVICE_TYPE_ASCEND
    elif sys.argv[1] == "--metax":
        device_type = DeviceType.DEVICE_TYPE_METAX
    elif sys.argv[1] == "--moore":
        device_type = DeviceType.DEVICE_TYPE_MOORE
    else:
        print(
Pan Zezhong's avatar
Pan Zezhong committed
583
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
584
585
586
587
588
        )
        sys.exit(1)

    ndev = int(sys.argv[3]) if len(sys.argv) > 3 else 1
    model = JiugeForCauslLM(model_path, device_type, ndev)
Pan Zezhong's avatar
Pan Zezhong committed
589
    model.generate("山东最高的山是?", 500)
PanZezhong's avatar
PanZezhong committed
590
    model.destroy_model_instance()
PanZezhong's avatar
PanZezhong committed
591
592
593
594


if __name__ == "__main__":
    test()