jiuge.py 26.2 KB
Newer Older
PanZezhong's avatar
PanZezhong committed
1
from typing import List, Sequence
2
3
4
5
6
7
8
9
10
import math
import os
from pathlib import Path
import safetensors
import sys
import time
import json
import torch
import transformers
PanZezhong's avatar
PanZezhong committed
11

PanZezhong's avatar
PanZezhong committed
12
from libinfinicore_infer import (
13
    JiugeModel,
14
15
    JiugeMetaCStruct,
    JiugeWeightsCStruct,
PanZezhong's avatar
PanZezhong committed
16
17
    DataType,
    DeviceType,
18
    KVCacheCStruct,
PanZezhong's avatar
PanZezhong committed
19
)
20
from infer_task import InferTask, KVCache
Pan Zezhong's avatar
Pan Zezhong committed
21
22

from ctypes import POINTER, c_float, c_int, c_uint, c_void_p, byref
PanZezhong's avatar
PanZezhong committed
23

Pan Zezhong's avatar
Pan Zezhong committed
24
torch.set_default_device("cpu")
PanZezhong's avatar
PanZezhong committed
25

26

PanZezhong's avatar
PanZezhong committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
class LlamaWeightsNaming:
    def input_embd(self):
        return "model.embed_tokens.weight"

    def output_norm(self):
        return "model.norm.weight"

    def output_embd(self):
        return "lm_head.weight"

    def attn_norm(self, i):
        return f"model.layers.{i}.input_layernorm.weight"

    def attn_q(self, i):
        return f"model.layers.{i}.self_attn.q_proj.weight"

    def attn_k(self, i):
        return f"model.layers.{i}.self_attn.k_proj.weight"

    def attn_v(self, i):
        return f"model.layers.{i}.self_attn.v_proj.weight"

    def attn_o(self, i):
        return f"model.layers.{i}.self_attn.o_proj.weight"

    def attn_q_b(self, i):
        return f"model.layers.{i}.self_attn.q_proj.bias"

    def attn_k_b(self, i):
        return f"model.layers.{i}.self_attn.k_proj.bias"

    def attn_v_b(self, i):
        return f"model.layers.{i}.self_attn.v_proj.bias"

61
62
63
64
65
66
    def attn_q_norm(self, i):
        return f"model.layers.{i}.self_attn.q_norm.weight"

    def attn_k_norm(self, i):
        return f"model.layers.{i}.self_attn.k_norm.weight"

PanZezhong's avatar
PanZezhong committed
67
68
69
    def ffn_norm(self, i):
        return f"model.layers.{i}.post_attention_layernorm.weight"

PanZezhong's avatar
PanZezhong committed
70
71
72
73
74
75
76
77
78
    def gate(self, i):
        return f"model.layers.{i}.mlp.gate_proj.weight"

    def up(self, i):
        return f"model.layers.{i}.mlp.up_proj.weight"

    def down(self, i):
        return f"model.layers.{i}.mlp.down_proj.weight"

PanZezhong's avatar
PanZezhong committed
79
80
81
82
83
84
    def match(state_dict):
        return (
            "model.norm.weight" in state_dict
            and "model.layers.0.self_attn.q_proj.weight" in state_dict
        )

PanZezhong's avatar
PanZezhong committed
85

86
class JiugeMetaFromLlama(JiugeMetaCStruct):
87
    def __init__(self, config, dtype=torch.float16, max_tokens=None):
Pan Zezhong's avatar
Pan Zezhong committed
88
        if dtype == torch.float16:
PanZezhong's avatar
PanZezhong committed
89
90
91
            dt_ = DataType.INFINI_DTYPE_F16
        elif dtype == torch.float32:
            dt_ = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
92
93
        elif dtype == torch.bfloat16:
            dt_ = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
94
95
        else:
            dt_ = DataType.INFINI_DTYPE_F16
96

97
98
99
100
        self.scale_input = 1.0
        self.scale_output = 1.0
        self.scale_o = 1.0
        self.scale_down = 1.0
PanZezhong's avatar
PanZezhong committed
101
        if (
102
            config["model_type"] in ["fm9g", "minicpm"]
PanZezhong's avatar
PanZezhong committed
103
104
105
106
            and "scale_emb" in config
            and "scale_depth" in config
            and "dim_model_base" in config
        ):
107
108
109
110
111
112
113
114
            self.scale_input = config["scale_emb"]
            self.scale_output = config["hidden_size"] // config["dim_model_base"]
            self.scale_o = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
            self.scale_down = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
115

PanZezhong's avatar
PanZezhong committed
116
        super().__init__(
PanZezhong's avatar
PanZezhong committed
117
            dt_logits=dt_,
Pan Zezhong's avatar
Pan Zezhong committed
118
119
120
            nlayer=config["num_hidden_layers"],
            d=config["hidden_size"],
            nh=config["num_attention_heads"],
PanZezhong's avatar
PanZezhong committed
121
            nkvh=(
Pan Zezhong's avatar
Pan Zezhong committed
122
123
124
                config["num_key_value_heads"]
                if "num_key_value_heads" in config
                else config["num_attention_heads"]
PanZezhong's avatar
PanZezhong committed
125
            ),
126
            dh=config["head_dim"] if "head_dim" in config else config["hidden_size"] // config["num_attention_heads"],
Pan Zezhong's avatar
Pan Zezhong committed
127
            di=config["intermediate_size"],
128
129
130
            dctx=(
                config["max_position_embeddings"] if max_tokens is None else max_tokens
            ),
Pan Zezhong's avatar
Pan Zezhong committed
131
132
133
            dvoc=config["vocab_size"],
            epsilon=config["rms_norm_eps"],
            theta=(config["rope_theta"] if "rope_theta" in config else 100000.0),
PanZezhong's avatar
PanZezhong committed
134
135
            end_token=2,
        )
PanZezhong's avatar
PanZezhong committed
136
        self.torch_dtype_logits = dtype
PanZezhong's avatar
PanZezhong committed
137
138


139
class JiugeWeightsImpl(JiugeWeightsCStruct):
Pan Zezhong's avatar
Pan Zezhong committed
140
141
142
143
144
145
146
147
    def __init__(
        self,
        meta,
        naming,
        state_dict,
        torch_dt_mat=torch.float16,
        torch_dt_norm=torch.float32,
        ndev=1,
PanZezhong's avatar
PanZezhong committed
148
        transpose_weight=True,
Pan Zezhong's avatar
Pan Zezhong committed
149
    ):
PanZezhong's avatar
PanZezhong committed
150
151
152
153
154
155
        nlayer = meta.nlayer
        nh = meta.nh
        nkvh = meta.nkvh
        dh = meta.dh
        d = meta.d
        di = meta.di
156
157
158
159
        scale_input = meta.scale_input
        scale_output = meta.scale_output
        scale_o = meta.scale_o
        scale_down = meta.scale_down
PanZezhong's avatar
PanZezhong committed
160
161
162
163
        assert nh % nkvh == 0
        assert nh % ndev == 0
        assert nkvh % ndev == 0
        assert di % ndev == 0
PanZezhong's avatar
PanZezhong committed
164
165
166
167
168
        torch_dt_logits = meta.torch_dtype_logits
        if torch_dt_mat == torch.float16:
            self.dt_mat = DataType.INFINI_DTYPE_F16
        elif torch_dt_mat == torch.float32:
            self.dt_mat = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
169
170
        elif torch_dt_mat == torch.bfloat16:
            self.dt_mat = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
171
172
173
174
175
176
        else:
            raise ValueError("Unsupported proj weight data type")
        if torch_dt_norm == torch.float16:
            self.dt_norm = DataType.INFINI_DTYPE_F16
        elif torch_dt_norm == torch.float32:
            self.dt_norm = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
177
178
        elif torch_dt_norm == torch.bfloat16:
            self.dt_norm = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
179
180
        else:
            raise ValueError("Unsupported norm weight data type")
PanZezhong's avatar
PanZezhong committed
181

Pan Zezhong's avatar
Pan Zezhong committed
182
183
184
185
186
187
188
189
190
191
        input_embd_naming = (
            naming.input_embd()
            if naming.input_embd() in state_dict
            else naming.output_embd()
        )
        output_embd_naming = (
            naming.output_embd()
            if naming.output_embd() in state_dict
            else naming.input_embd()
        )
PanZezhong's avatar
PanZezhong committed
192
        self.transpose_linear_weights = 1 if transpose_weight else 0
PanZezhong's avatar
PanZezhong committed
193
        self.nlayer = nlayer
194
195
196
        self.input_embd_tensor = (
            state_dict[input_embd_naming].to(torch_dt_logits) * scale_input
        )
PanZezhong's avatar
PanZezhong committed
197
        self.input_embd = self.input_embd_tensor.data_ptr()
198
199
200
        self.output_norm_tensor = (
            state_dict[naming.output_norm()].to(torch_dt_norm) * scale_output
        )
PanZezhong's avatar
PanZezhong committed
201
        self.output_norm = self.output_norm_tensor.data_ptr()
Pan Zezhong's avatar
Pan Zezhong committed
202
        self.output_embd_tensor = state_dict[output_embd_naming].to(torch_dt_mat)
PanZezhong's avatar
PanZezhong committed
203
        if not transpose_weight:
204
205
206
            self.output_embd_tensor = self.output_embd_tensor.transpose(
                0, 1
            ).contiguous()
PanZezhong's avatar
PanZezhong committed
207
208
209
        self.output_embd = self.output_embd_tensor.data_ptr()

        self.attn_norm_tensors = [
PanZezhong's avatar
PanZezhong committed
210
            state_dict[naming.attn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
PanZezhong's avatar
PanZezhong committed
211
212
213
214
215
        ]
        self.attn_norm_ptrs = [
            self.attn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.attn_norm = (c_void_p * nlayer)(*self.attn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237

        def qkv_slices(_i):
            _Q = (
                state_dict[naming.attn_q(_i)]
                .reshape([nh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _K = (
                state_dict[naming.attn_k(_i)]
                .reshape([nkvh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _V = state_dict[naming.attn_v(_i)].reshape([nkvh, dh // 2, 2, d])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_Q[_idev * _nh : (_idev + 1) * _nh, :, :, :])
                _result.append(_K[_idev * _nkvh : (_idev + 1) * _nkvh, :, :, :])
                _result.append(_V[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
238
239
240
        self.qkv_tensor = [
            torch.concat(qkv_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
241
242
        if not transpose_weight:
            for i in range(nlayer):
243
244
245
246
247
248
                self.qkv_tensor[i] = (
                    self.qkv_tensor[i]
                    .reshape(ndev, (nh + 2 * nkvh) // ndev * dh, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
        self.qkv_tensor_ptrs = [self.qkv_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_qkv = (c_void_p * nlayer)(*self.qkv_tensor_ptrs)

        def qkv_b_slices(_i):
            _QB = (
                state_dict[naming.attn_q_b(_i)]
                .reshape([nh, 2, dh // 2])
                .transpose(1, 2)
            )
            _KB = (
                state_dict[naming.attn_k_b(_i)]
                .reshape([nkvh, 2, dh // 2])
                .transpose(1, 2)
            )
            _VB = state_dict[naming.attn_v_b(_i)].reshape([nkvh, dh // 2, 2])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
Pan Zezhong's avatar
Pan Zezhong committed
268
269
270
                _result.append(_QB[_idev * _nh : (_idev + 1) * _nh, :, :].flatten())
                _result.append(_KB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
                _result.append(_VB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
PanZezhong's avatar
PanZezhong committed
271
272
273
            return _result

        if naming.attn_q_b(0) in state_dict:
Pan Zezhong's avatar
Pan Zezhong committed
274
275
276
            self.qkv_b_tensors = [
                torch.concat(qkv_b_slices(i)).to(torch_dt_logits) for i in range(nlayer)
            ]
PanZezhong's avatar
PanZezhong committed
277
278
279
280
281
282
283
            self.qkv_b_tensor_ptrs = [
                self.qkv_b_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_qkv_b = (c_void_p * nlayer)(*self.qkv_b_tensor_ptrs)
        else:
            self.attn_qkv_b = None

284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
        if naming.attn_q_norm(0) in state_dict:
            self.attn_q_norm_tensors = [
                state_dict[naming.attn_q_norm(i)]
                .reshape([2, dh // 2])
                .transpose(0, 1)
                .contiguous()
                .to(torch_dt_norm)
                for i in range(nlayer)
            ]
            self.attn_q_norm_ptrs = [
                self.attn_q_norm_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_q_norm = (c_void_p * nlayer)(*self.attn_q_norm_ptrs)
            self.attn_k_norm_tensors = [
                state_dict[naming.attn_k_norm(i)]
                .reshape([2, dh // 2])
                .transpose(0, 1)
                .contiguous()
                .to(torch_dt_norm)
                for i in range(nlayer)
            ]
            self.attn_k_norm_ptrs = [
                self.attn_k_norm_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_k_norm = (c_void_p * nlayer)(*self.attn_k_norm_ptrs)
        else:
            self.attn_q_norm = None
            self.attn_k_norm = None

PanZezhong's avatar
PanZezhong committed
313
        self.attn_o_tensor = [
314
315
            (
                state_dict[naming.attn_o(i)]
PanZezhong's avatar
PanZezhong committed
316
317
318
319
                .to(torch_dt_mat)
                .reshape([d, ndev, nh // ndev * dh])
                .transpose(0, 1)
                .contiguous()
320
321
322
323
324
325
                if transpose_weight
                else state_dict[naming.attn_o(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
326
            * scale_o
PanZezhong's avatar
PanZezhong committed
327
328
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
329
330
331
        self.attn_o_ptrs = [self.attn_o_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_o = (c_void_p * nlayer)(*self.attn_o_ptrs)

Pan Zezhong's avatar
Pan Zezhong committed
332
333
334
        self.ffn_norm_tensors = [
            state_dict[naming.ffn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
335
336
337
338
        self.ffn_norm_ptrs = [
            self.ffn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.ffn_norm = (c_void_p * nlayer)(*self.ffn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
339
340
341
342
343
344
345
346
347
348
349

        def gate_up_slices(_i):
            _result = []
            _di = di // ndev
            for _idev in range(ndev):
                _start = _idev * _di
                _end = (_idev + 1) * _di
                _result.append(state_dict[naming.gate(_i)][_start:_end, :])
                _result.append(state_dict[naming.up(_i)][_start:_end, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
350
351
352
        self.gate_up_tensors = [
            torch.concat(gate_up_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
353
354
        if not transpose_weight:
            for i in range(nlayer):
355
356
357
358
359
360
                self.gate_up_tensors[i] = (
                    self.gate_up_tensors[i]
                    .reshape(ndev, 2 * di // ndev, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
361
362
        self.gate_up_ptrs = [self.gate_up_tensors[i].data_ptr() for i in range(nlayer)]
        self.ffn_gate_up = (c_void_p * nlayer)(*self.gate_up_ptrs)
PanZezhong's avatar
PanZezhong committed
363
364

        self.ffn_down_tensor = [
365
366
367
368
369
370
371
372
373
374
375
376
            (
                state_dict[naming.down(i)]
                .to(torch_dt_mat)
                .reshape([d, ndev, di // ndev])
                .transpose(0, 1)
                .contiguous()
                if transpose_weight
                else state_dict[naming.down(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
377
            * scale_down
PanZezhong's avatar
PanZezhong committed
378
379
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
380
381
        self.ffn_down_ptrs = [self.ffn_down_tensor[i].data_ptr() for i in range(nlayer)]
        self.ffn_down = (c_void_p * nlayer)(*self.ffn_down_ptrs)
PanZezhong's avatar
PanZezhong committed
382
383


Pan Zezhong's avatar
Pan Zezhong committed
384
385
386
387
388
389
390
391
392
class JiugeBatchedTask:
    def __init__(self, tasks: List[InferTask]):
        self.tasks = tasks
        self.nreq = len(tasks)

        # Precompute fields
        token_lists = [t.tokens for t in tasks]
        self.req_lens_list = [len(toks) for toks in token_lists]
        self.req_pos_list = [t.pos for t in tasks]
393
        self.kv_cache_ptrs = [t.kvcache().data() for t in tasks]
Pan Zezhong's avatar
Pan Zezhong committed
394
395
396
397
398
399
400
401
402
403
404
405
        self.temperaturas_list = [t.temperature for t in tasks]
        self.topks_list = [t.topk for t in tasks]
        self.topps_list = [t.topp for t in tasks]

        # Flatten token lists
        flat_tokens = [tok for toks in token_lists for tok in toks]
        self.ntok = len(flat_tokens)

        # Convert to ctypes arrays in one pass
        self.tokens = (c_uint * self.ntok)(*flat_tokens)
        self.req_lens = (c_uint * self.nreq)(*self.req_lens_list)
        self.req_pos = (c_uint * self.nreq)(*self.req_pos_list)
406
        self.kv_caches = (POINTER(KVCacheCStruct) * self.nreq)(*self.kv_cache_ptrs)
Pan Zezhong's avatar
Pan Zezhong committed
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
        self.temperaturas = (c_float * self.nreq)(*self.temperaturas_list)
        self.topks = (c_uint * self.nreq)(*self.topks_list)
        self.topps = (c_float * self.nreq)(*self.topps_list)

    def input_args(self):
        return (
            self.tokens,
            self.ntok,
            self.req_lens,
            self.nreq,
            self.req_pos,
            self.kv_caches,
            self.temperaturas,
            self.topks,
            self.topps,
        )


PanZezhong's avatar
PanZezhong committed
425
class JiugeForCauslLM:
426
427
428
    def __init__(
        self, model_dir_path, device=DeviceType.DEVICE_TYPE_CPU, ndev=1, max_tokens=None
    ):
PanZezhong's avatar
PanZezhong committed
429
        def load_all_safetensors_from_dir(dir_path_: str):
PanZezhong's avatar
PanZezhong committed
430
431
432
433
434
            tensors_ = {}
            dir_path_ = Path(dir_path_)
            for file in sorted(dir_path_.glob("*.safetensors")):
                data_ = safetensors.safe_open(file, "pt")
                for name_ in data_.keys():
PanZezhong's avatar
PanZezhong committed
435
                    tensors_[name_] = data_.get_tensor(name_)
PanZezhong's avatar
PanZezhong committed
436
            return tensors_
437

PanZezhong's avatar
PanZezhong committed
438
439
        print("Loading model weights to host...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
440

Pan Zezhong's avatar
Pan Zezhong committed
441
442
        with open(os.path.join(model_dir_path, "config.json"), "r") as f:
            config = json.load(f)
PanZezhong's avatar
PanZezhong committed
443
444
            self.config = config
        eos_token_id = self.config["eos_token_id"]
445
446
447
448
449
450
        self.eos_token_id = (
            [eos_token_id] if type(eos_token_id) == int else eos_token_id
        )
        transpose_weight = (
            device != DeviceType.DEVICE_TYPE_ASCEND
        )  # y = xW is faster than y=xW^T on Ascend
451
452
453

        self.jiuge_model = JiugeModel()

Pan Zezhong's avatar
Pan Zezhong committed
454
        if "llama" == config["model_type"]:
455
456
457
458
459
            model = (
                transformers.LlamaForCausalLM.from_pretrained(model_dir_path)
                .cpu()
                .half()
            )
460
            self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
461
462
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir_path)
            self.weights = JiugeWeightsImpl(
463
464
465
466
467
                self.meta,
                LlamaWeightsNaming(),
                model.state_dict(),
                ndev=ndev,
                transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
468
            )
469
        elif "fm9g" == config["model_type"] or "minicpm" == config["model_type"]:
470
471
472
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
PanZezhong's avatar
PanZezhong committed
473
474
475
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
476
477
478
479
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
PanZezhong's avatar
PanZezhong committed
480
            if LlamaWeightsNaming.match(state_dict):
481
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
482
                self.weights = JiugeWeightsImpl(
483
484
485
486
487
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
488
489
490
491
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
492
493
            else:
                raise ValueError("Unsupported weight naming")
Pan Zezhong's avatar
Pan Zezhong committed
494
        elif "fm9g7b" == config["model_type"]:
Pan Zezhong's avatar
Pan Zezhong committed
495
496
497
498
499
500
501
502
503
504
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
Pan Zezhong's avatar
Pan Zezhong committed
505
            if LlamaWeightsNaming.match(state_dict):
506
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
Pan Zezhong's avatar
Pan Zezhong committed
507
                self.weights = JiugeWeightsImpl(
508
509
510
511
512
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
Pan Zezhong's avatar
Pan Zezhong committed
513
514
515
516
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
517
518
            else:
                raise ValueError("Unsupported weight naming")
519
        elif "qwen2" == config["model_type"] or "qwen3" == config["model_type"]:
PanZezhong's avatar
PanZezhong committed
520
521
            state_dict = load_all_safetensors_from_dir(model_dir_path)
            if LlamaWeightsNaming.match(state_dict):
522
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
523
                self.weights = JiugeWeightsImpl(
524
525
526
527
528
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
529
530
531
532
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path
                )
PanZezhong's avatar
PanZezhong committed
533
534
        else:
            raise ValueError("Unsupported model architecture")
PanZezhong's avatar
PanZezhong committed
535

536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
        
        if "llama" == config["model_type"]:
            from tokenizers import decoders as _dec
            backend = getattr(self.tokenizer, "backend_tokenizer", None)
            target = getattr(backend, "_tokenizer", backend)
            norm = getattr(target, "normalizer", None)
            dec = getattr(target, "decoder", None)
            sn = repr(norm)[:800] if norm is not None else ""
            sd = repr(dec)[:800] if dec is not None else ""
            has_prepend = "Prepend" in sn
            has_strip = "Strip" in sd
            if has_prepend and has_strip:
                target.decoder = _dec.Sequence([
                    _dec.Replace("▁", " "),
                    _dec.ByteFallback(),
                    _dec.Fuse(),
                ])

PanZezhong's avatar
PanZezhong committed
554
555
        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
556

PanZezhong's avatar
PanZezhong committed
557
558
        print(f"Creating model on {ndev} devices...")
        load_start_time = time.time()
blkmjsian's avatar
blkmjsian committed
559
560
561
        self.dev_ids = (c_int * ndev)(*[i for i in range(ndev)])
        self.ndev = ndev
        self.device = device
562
563

        self.model_instance = self.jiuge_model.create_model(
PanZezhong's avatar
PanZezhong committed
564
565
566
567
            byref(self.meta),
            byref(self.weights),
            device,
            ndev,
blkmjsian's avatar
blkmjsian committed
568
            self.dev_ids,
PanZezhong's avatar
PanZezhong committed
569
        )
PanZezhong's avatar
PanZezhong committed
570
571
        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
572

Pan Zezhong's avatar
Pan Zezhong committed
573
574
    def max_context_len(self):
        return self.meta.dctx
575

Pan Zezhong's avatar
Pan Zezhong committed
576
    def create_kv_cache(self):
577
        return self.jiuge_model.create_kv_cache(
blkmjsian's avatar
blkmjsian committed
578
579
580
581
582
583
584
585
586
587
            self.meta.nlayer,
            self.meta.dctx,
            self.meta.nkvh,
            self.meta.dh,
            self.meta.dh,
            self.meta.dt_logits,
            self.device,
            self.dev_ids,
            self.ndev,
        )
Pan Zezhong's avatar
Pan Zezhong committed
588
589

    def drop_kv_cache(self, kv_cache):
590
        self.jiuge_model.drop_kv_cache(kv_cache)
Pan Zezhong's avatar
Pan Zezhong committed
591

Pan Zezhong's avatar
Pan Zezhong committed
592
593
594
    def batch_infer_one_round(self, tasks: List[InferTask]):
        output = (c_uint * len(tasks))()
        batch_inputs = JiugeBatchedTask(tasks)
595
        self.jiuge_model.infer_batch(
Pan Zezhong's avatar
Pan Zezhong committed
596
597
598
            self.model_instance,
            *(batch_inputs.input_args()),
            output,
Pan Zezhong's avatar
Pan Zezhong committed
599
        )
Pan Zezhong's avatar
Pan Zezhong committed
600
        return list(output)
Pan Zezhong's avatar
Pan Zezhong committed
601

Pan Zezhong's avatar
Pan Zezhong committed
602
    def generate(self, input_content, max_steps, topp_=1.0, topk_=1, temperature_=1.0):
Pan Zezhong's avatar
Pan Zezhong committed
603
604
605
606
607
        input_content = self.tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": input_content}],
            add_generation_prompt=True,
            tokenize=False,
        )
PanZezhong's avatar
PanZezhong committed
608
609
        print(input_content, end="", flush=True)
        tokens = self.tokenizer.encode(input_content)
610
611
612
613
614
615
616
617
618
619
        infer_task = InferTask(
            0,
            tokens,
            self.max_context_len(),
            temperature_,
            topk_,
            topp_,
            self.eos_token_id,
        )
        infer_task.bind_kvcache(KVCache(self))
PanZezhong's avatar
PanZezhong committed
620
621

        steps = 0
Pan Zezhong's avatar
Pan Zezhong committed
622
        total_time = 0
623
        output_content = ""
Pan Zezhong's avatar
Pan Zezhong committed
624
625
626

        for step_i in range(max_steps):
            start_time = time.time()
627
            output_tokens = self.batch_infer_one_round([infer_task])
PanZezhong's avatar
PanZezhong committed
628
            end_time = time.time()
629
            steps += 1
630
631
            output_str = self.tokenizer.decode(output_tokens[0])

PanZezhong's avatar
PanZezhong committed
632
633
            output_content += output_str
            print(output_str, end="", flush=True)
PanZezhong's avatar
PanZezhong committed
634
635
            if output_tokens[0] in self.eos_token_id:
                break
636
            infer_task.next(output_tokens[0])
Pan Zezhong's avatar
Pan Zezhong committed
637

Pan Zezhong's avatar
Pan Zezhong committed
638
639
            if step_i > 0:
                total_time += end_time - start_time
PanZezhong's avatar
PanZezhong committed
640
641

        print("\n")
Pan Zezhong's avatar
Pan Zezhong committed
642
        avg_time = total_time * 1000 / (steps - 1)
PanZezhong's avatar
PanZezhong committed
643
        print(f"Time per step: {avg_time:.3f}ms")
644
645

        infer_task._kv_cache.drop(self)
PanZezhong's avatar
PanZezhong committed
646
        return output_content, avg_time
Pan Zezhong's avatar
Pan Zezhong committed
647

PanZezhong's avatar
PanZezhong committed
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
    def perplexity(self, test_sequences: List[Sequence[int]], batch_size=10):
        tasks = [
            InferTask(i, [], self.max_context_len(), 1.0, 1, 1.0, self.eos_token_id)
            for i in range(batch_size)
        ]
        kv_caches = [KVCache(self) for _ in range(batch_size)]

        nll = 0.0
        total_len = 0

        for i in range(0, len(test_sequences), batch_size):
            batch_id = 0
            true_tokens = []
            while batch_id < batch_size and batch_id + i < len(test_sequences):
                input_tokens = test_sequences[i + batch_id][:-1]
                true_tokens.extend(test_sequences[i + batch_id][1:])
                tasks[batch_id].tokens = input_tokens
                tasks[batch_id].bind_kvcache(kv_caches[batch_id])
                batch_id += 1

            batch_inputs = JiugeBatchedTask(tasks[:batch_id])
            logits = torch.zeros(
                (batch_inputs.ntok, self.meta.dvoc), dtype=self.meta.torch_dtype_logits
            )
672
            self.jiuge_model.forward_batch(
PanZezhong's avatar
PanZezhong committed
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
                self.model_instance,
                batch_inputs.tokens,
                batch_inputs.ntok,
                batch_inputs.req_lens,
                batch_inputs.nreq,
                batch_inputs.req_pos,
                batch_inputs.kv_caches,
                logits.data_ptr(),
            )

            logits = logits.float()
            token_ids = torch.tensor(true_tokens, dtype=torch.int64)  # [ntok,]
            log_probs = torch.nn.functional.log_softmax(logits, dim=-1)  # (ntok, vocab)
            token_logprobs = log_probs[
                torch.arange(batch_inputs.ntok), token_ids
            ]  # (ntok,)

            start = 0
            for l in batch_inputs.req_lens_list:
                nll += -token_logprobs[start : start + l].sum().item()
                start += l
            total_len += token_logprobs.numel()

        for task in tasks:
            task.release_kvcache()

        return math.exp(nll / total_len)

PanZezhong's avatar
PanZezhong committed
701
    def destroy_model_instance(self):
702
        self.jiuge_model.destroy_model(self.model_instance)
PanZezhong's avatar
PanZezhong committed
703
        print("Model destroyed")
PanZezhong's avatar
PanZezhong committed
704
705
706
707
708


def test():
    if len(sys.argv) < 3:
        print(
zhuyue's avatar
zhuyue committed
709
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
        )
        sys.exit(1)
    model_path = sys.argv[2]
    device_type = DeviceType.DEVICE_TYPE_CPU
    if sys.argv[1] == "--cpu":
        device_type = DeviceType.DEVICE_TYPE_CPU
    elif sys.argv[1] == "--nvidia":
        device_type = DeviceType.DEVICE_TYPE_NVIDIA
    elif sys.argv[1] == "--cambricon":
        device_type = DeviceType.DEVICE_TYPE_CAMBRICON
    elif sys.argv[1] == "--ascend":
        device_type = DeviceType.DEVICE_TYPE_ASCEND
    elif sys.argv[1] == "--metax":
        device_type = DeviceType.DEVICE_TYPE_METAX
    elif sys.argv[1] == "--moore":
        device_type = DeviceType.DEVICE_TYPE_MOORE
zhangyue's avatar
zhangyue committed
726
727
    elif sys.argv[1] == "--iluvatar":
        device_type = DeviceType.DEVICE_TYPE_ILUVATAR
zhangyue's avatar
zhangyue committed
728
729
    elif sys.argv[1] == "--kunlun":
        device_type = DeviceType.DEVICE_TYPE_KUNLUN
zhuyue's avatar
zhuyue committed
730
731
    elif sys.argv[1] == "--hygon":
        device_type = DeviceType.DEVICE_TYPE_HYGON
PanZezhong's avatar
PanZezhong committed
732
733
    else:
        print(
zhuyue's avatar
zhuyue committed
734
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
735
736
737
738
739
        )
        sys.exit(1)

    ndev = int(sys.argv[3]) if len(sys.argv) > 3 else 1
    model = JiugeForCauslLM(model_path, device_type, ndev)
Pan Zezhong's avatar
Pan Zezhong committed
740
    model.generate("山东最高的山是?", 500)
PanZezhong's avatar
PanZezhong committed
741
    model.destroy_model_instance()
PanZezhong's avatar
PanZezhong committed
742
743
744
745


if __name__ == "__main__":
    test()