jiuge.py 23.7 KB
Newer Older
PanZezhong's avatar
PanZezhong committed
1
2
3
from typing import List, Sequence

from sympy import true
PanZezhong's avatar
PanZezhong committed
4
from libinfinicore_infer import (
5
6
7
    JiugeMetaCStruct,
    JiugeWeightsCStruct,
    KVCacheCStruct,
PanZezhong's avatar
PanZezhong committed
8
9
10
    DataType,
    DeviceType,
    create_jiuge_model,
PanZezhong's avatar
PanZezhong committed
11
    destroy_jiuge_model,
PanZezhong's avatar
PanZezhong committed
12
13
14
    create_kv_cache,
    drop_kv_cache,
    infer_batch,
PanZezhong's avatar
PanZezhong committed
15
    forward_batch,
PanZezhong's avatar
PanZezhong committed
16
)
17
from infer_task import InferTask, KVCache
Pan Zezhong's avatar
Pan Zezhong committed
18
19
20
21
22
23
24
25

from ctypes import POINTER, c_float, c_int, c_uint, c_void_p, byref
import os
from pathlib import Path
import safetensors
import sys
import time
import json
26
import math
PanZezhong's avatar
PanZezhong committed
27
28
29
import torch
import transformers

Pan Zezhong's avatar
Pan Zezhong committed
30
torch.set_default_device("cpu")
PanZezhong's avatar
PanZezhong committed
31

32

PanZezhong's avatar
PanZezhong committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
class LlamaWeightsNaming:
    def input_embd(self):
        return "model.embed_tokens.weight"

    def output_norm(self):
        return "model.norm.weight"

    def output_embd(self):
        return "lm_head.weight"

    def attn_norm(self, i):
        return f"model.layers.{i}.input_layernorm.weight"

    def attn_q(self, i):
        return f"model.layers.{i}.self_attn.q_proj.weight"

    def attn_k(self, i):
        return f"model.layers.{i}.self_attn.k_proj.weight"

    def attn_v(self, i):
        return f"model.layers.{i}.self_attn.v_proj.weight"

    def attn_o(self, i):
        return f"model.layers.{i}.self_attn.o_proj.weight"

    def attn_q_b(self, i):
        return f"model.layers.{i}.self_attn.q_proj.bias"

    def attn_k_b(self, i):
        return f"model.layers.{i}.self_attn.k_proj.bias"

    def attn_v_b(self, i):
        return f"model.layers.{i}.self_attn.v_proj.bias"

PanZezhong's avatar
PanZezhong committed
67
68
69
    def ffn_norm(self, i):
        return f"model.layers.{i}.post_attention_layernorm.weight"

PanZezhong's avatar
PanZezhong committed
70
71
72
73
74
75
76
77
78
    def gate(self, i):
        return f"model.layers.{i}.mlp.gate_proj.weight"

    def up(self, i):
        return f"model.layers.{i}.mlp.up_proj.weight"

    def down(self, i):
        return f"model.layers.{i}.mlp.down_proj.weight"

PanZezhong's avatar
PanZezhong committed
79
80
81
82
83
84
    def match(state_dict):
        return (
            "model.norm.weight" in state_dict
            and "model.layers.0.self_attn.q_proj.weight" in state_dict
        )

PanZezhong's avatar
PanZezhong committed
85

86
class JiugeMetaFromLlama(JiugeMetaCStruct):
87
    def __init__(self, config, dtype=torch.float16, max_tokens=None):
Pan Zezhong's avatar
Pan Zezhong committed
88
        if dtype == torch.float16:
PanZezhong's avatar
PanZezhong committed
89
90
91
            dt_ = DataType.INFINI_DTYPE_F16
        elif dtype == torch.float32:
            dt_ = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
92
93
        elif dtype == torch.bfloat16:
            dt_ = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
94
95
        else:
            dt_ = DataType.INFINI_DTYPE_F16
96

97
98
99
100
        self.scale_input = 1.0
        self.scale_output = 1.0
        self.scale_o = 1.0
        self.scale_down = 1.0
PanZezhong's avatar
PanZezhong committed
101
        if (
102
            config["model_type"] in ["fm9g", "minicpm"]
PanZezhong's avatar
PanZezhong committed
103
104
105
106
            and "scale_emb" in config
            and "scale_depth" in config
            and "dim_model_base" in config
        ):
107
108
109
110
111
112
113
114
            self.scale_input = config["scale_emb"]
            self.scale_output = config["hidden_size"] // config["dim_model_base"]
            self.scale_o = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
            self.scale_down = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
115

PanZezhong's avatar
PanZezhong committed
116
        super().__init__(
PanZezhong's avatar
PanZezhong committed
117
            dt_logits=dt_,
Pan Zezhong's avatar
Pan Zezhong committed
118
119
120
            nlayer=config["num_hidden_layers"],
            d=config["hidden_size"],
            nh=config["num_attention_heads"],
PanZezhong's avatar
PanZezhong committed
121
            nkvh=(
Pan Zezhong's avatar
Pan Zezhong committed
122
123
124
                config["num_key_value_heads"]
                if "num_key_value_heads" in config
                else config["num_attention_heads"]
PanZezhong's avatar
PanZezhong committed
125
            ),
Pan Zezhong's avatar
Pan Zezhong committed
126
127
            dh=config["hidden_size"] // config["num_attention_heads"],
            di=config["intermediate_size"],
128
129
130
            dctx=(
                config["max_position_embeddings"] if max_tokens is None else max_tokens
            ),
Pan Zezhong's avatar
Pan Zezhong committed
131
132
133
            dvoc=config["vocab_size"],
            epsilon=config["rms_norm_eps"],
            theta=(config["rope_theta"] if "rope_theta" in config else 100000.0),
PanZezhong's avatar
PanZezhong committed
134
135
            end_token=2,
        )
PanZezhong's avatar
PanZezhong committed
136
        self.torch_dtype_logits = dtype
PanZezhong's avatar
PanZezhong committed
137
138


139
class JiugeWeightsImpl(JiugeWeightsCStruct):
Pan Zezhong's avatar
Pan Zezhong committed
140
141
142
143
144
145
146
147
    def __init__(
        self,
        meta,
        naming,
        state_dict,
        torch_dt_mat=torch.float16,
        torch_dt_norm=torch.float32,
        ndev=1,
PanZezhong's avatar
PanZezhong committed
148
        transpose_weight=True,
Pan Zezhong's avatar
Pan Zezhong committed
149
    ):
PanZezhong's avatar
PanZezhong committed
150
151
152
153
154
155
        nlayer = meta.nlayer
        nh = meta.nh
        nkvh = meta.nkvh
        dh = meta.dh
        d = meta.d
        di = meta.di
156
157
158
159
        scale_input = meta.scale_input
        scale_output = meta.scale_output
        scale_o = meta.scale_o
        scale_down = meta.scale_down
PanZezhong's avatar
PanZezhong committed
160
161
162
163
        assert nh % nkvh == 0
        assert nh % ndev == 0
        assert nkvh % ndev == 0
        assert di % ndev == 0
PanZezhong's avatar
PanZezhong committed
164
165
166
167
168
        torch_dt_logits = meta.torch_dtype_logits
        if torch_dt_mat == torch.float16:
            self.dt_mat = DataType.INFINI_DTYPE_F16
        elif torch_dt_mat == torch.float32:
            self.dt_mat = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
169
170
        elif torch_dt_mat == torch.bfloat16:
            self.dt_mat = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
171
172
173
174
175
176
        else:
            raise ValueError("Unsupported proj weight data type")
        if torch_dt_norm == torch.float16:
            self.dt_norm = DataType.INFINI_DTYPE_F16
        elif torch_dt_norm == torch.float32:
            self.dt_norm = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
177
178
        elif torch_dt_norm == torch.bfloat16:
            self.dt_norm = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
179
180
        else:
            raise ValueError("Unsupported norm weight data type")
PanZezhong's avatar
PanZezhong committed
181

Pan Zezhong's avatar
Pan Zezhong committed
182
183
184
185
186
187
188
189
190
191
        input_embd_naming = (
            naming.input_embd()
            if naming.input_embd() in state_dict
            else naming.output_embd()
        )
        output_embd_naming = (
            naming.output_embd()
            if naming.output_embd() in state_dict
            else naming.input_embd()
        )
PanZezhong's avatar
PanZezhong committed
192
        self.transpose_linear_weights = 1 if transpose_weight else 0
PanZezhong's avatar
PanZezhong committed
193
        self.nlayer = nlayer
194
195
196
        self.input_embd_tensor = (
            state_dict[input_embd_naming].to(torch_dt_logits) * scale_input
        )
PanZezhong's avatar
PanZezhong committed
197
        self.input_embd = self.input_embd_tensor.data_ptr()
198
199
200
        self.output_norm_tensor = (
            state_dict[naming.output_norm()].to(torch_dt_norm) * scale_output
        )
PanZezhong's avatar
PanZezhong committed
201
        self.output_norm = self.output_norm_tensor.data_ptr()
Pan Zezhong's avatar
Pan Zezhong committed
202
        self.output_embd_tensor = state_dict[output_embd_naming].to(torch_dt_mat)
PanZezhong's avatar
PanZezhong committed
203
        if not transpose_weight:
204
205
206
            self.output_embd_tensor = self.output_embd_tensor.transpose(
                0, 1
            ).contiguous()
PanZezhong's avatar
PanZezhong committed
207
208
209
        self.output_embd = self.output_embd_tensor.data_ptr()

        self.attn_norm_tensors = [
PanZezhong's avatar
PanZezhong committed
210
            state_dict[naming.attn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
PanZezhong's avatar
PanZezhong committed
211
212
213
214
215
        ]
        self.attn_norm_ptrs = [
            self.attn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.attn_norm = (c_void_p * nlayer)(*self.attn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237

        def qkv_slices(_i):
            _Q = (
                state_dict[naming.attn_q(_i)]
                .reshape([nh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _K = (
                state_dict[naming.attn_k(_i)]
                .reshape([nkvh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _V = state_dict[naming.attn_v(_i)].reshape([nkvh, dh // 2, 2, d])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_Q[_idev * _nh : (_idev + 1) * _nh, :, :, :])
                _result.append(_K[_idev * _nkvh : (_idev + 1) * _nkvh, :, :, :])
                _result.append(_V[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
238
239
240
        self.qkv_tensor = [
            torch.concat(qkv_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
241
242
        if not transpose_weight:
            for i in range(nlayer):
243
244
245
246
247
248
                self.qkv_tensor[i] = (
                    self.qkv_tensor[i]
                    .reshape(ndev, (nh + 2 * nkvh) // ndev * dh, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
        self.qkv_tensor_ptrs = [self.qkv_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_qkv = (c_void_p * nlayer)(*self.qkv_tensor_ptrs)

        def qkv_b_slices(_i):
            _QB = (
                state_dict[naming.attn_q_b(_i)]
                .reshape([nh, 2, dh // 2])
                .transpose(1, 2)
            )
            _KB = (
                state_dict[naming.attn_k_b(_i)]
                .reshape([nkvh, 2, dh // 2])
                .transpose(1, 2)
            )
            _VB = state_dict[naming.attn_v_b(_i)].reshape([nkvh, dh // 2, 2])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
Pan Zezhong's avatar
Pan Zezhong committed
268
269
270
                _result.append(_QB[_idev * _nh : (_idev + 1) * _nh, :, :].flatten())
                _result.append(_KB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
                _result.append(_VB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
PanZezhong's avatar
PanZezhong committed
271
272
273
            return _result

        if naming.attn_q_b(0) in state_dict:
Pan Zezhong's avatar
Pan Zezhong committed
274
275
276
            self.qkv_b_tensors = [
                torch.concat(qkv_b_slices(i)).to(torch_dt_logits) for i in range(nlayer)
            ]
PanZezhong's avatar
PanZezhong committed
277
278
279
280
281
282
283
            self.qkv_b_tensor_ptrs = [
                self.qkv_b_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_qkv_b = (c_void_p * nlayer)(*self.qkv_b_tensor_ptrs)
        else:
            self.attn_qkv_b = None

PanZezhong's avatar
PanZezhong committed
284
        self.attn_o_tensor = [
285
286
            (
                state_dict[naming.attn_o(i)]
PanZezhong's avatar
PanZezhong committed
287
288
289
290
                .to(torch_dt_mat)
                .reshape([d, ndev, nh // ndev * dh])
                .transpose(0, 1)
                .contiguous()
291
292
293
294
295
296
                if transpose_weight
                else state_dict[naming.attn_o(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
297
            * scale_o
PanZezhong's avatar
PanZezhong committed
298
299
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
300
301
302
        self.attn_o_ptrs = [self.attn_o_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_o = (c_void_p * nlayer)(*self.attn_o_ptrs)

Pan Zezhong's avatar
Pan Zezhong committed
303
304
305
        self.ffn_norm_tensors = [
            state_dict[naming.ffn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
306
307
308
309
        self.ffn_norm_ptrs = [
            self.ffn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.ffn_norm = (c_void_p * nlayer)(*self.ffn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
310
311
312
313
314
315
316
317
318
319
320

        def gate_up_slices(_i):
            _result = []
            _di = di // ndev
            for _idev in range(ndev):
                _start = _idev * _di
                _end = (_idev + 1) * _di
                _result.append(state_dict[naming.gate(_i)][_start:_end, :])
                _result.append(state_dict[naming.up(_i)][_start:_end, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
321
322
323
        self.gate_up_tensors = [
            torch.concat(gate_up_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
324
325
        if not transpose_weight:
            for i in range(nlayer):
326
327
328
329
330
331
                self.gate_up_tensors[i] = (
                    self.gate_up_tensors[i]
                    .reshape(ndev, 2 * di // ndev, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
332
333
        self.gate_up_ptrs = [self.gate_up_tensors[i].data_ptr() for i in range(nlayer)]
        self.ffn_gate_up = (c_void_p * nlayer)(*self.gate_up_ptrs)
PanZezhong's avatar
PanZezhong committed
334
335

        self.ffn_down_tensor = [
336
337
338
339
340
341
342
343
344
345
346
347
            (
                state_dict[naming.down(i)]
                .to(torch_dt_mat)
                .reshape([d, ndev, di // ndev])
                .transpose(0, 1)
                .contiguous()
                if transpose_weight
                else state_dict[naming.down(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
348
            * scale_down
PanZezhong's avatar
PanZezhong committed
349
350
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
351
352
        self.ffn_down_ptrs = [self.ffn_down_tensor[i].data_ptr() for i in range(nlayer)]
        self.ffn_down = (c_void_p * nlayer)(*self.ffn_down_ptrs)
PanZezhong's avatar
PanZezhong committed
353
354


Pan Zezhong's avatar
Pan Zezhong committed
355
356
357
358
359
360
361
362
363
class JiugeBatchedTask:
    def __init__(self, tasks: List[InferTask]):
        self.tasks = tasks
        self.nreq = len(tasks)

        # Precompute fields
        token_lists = [t.tokens for t in tasks]
        self.req_lens_list = [len(toks) for toks in token_lists]
        self.req_pos_list = [t.pos for t in tasks]
364
        self.kv_cache_ptrs = [t.kvcache().data() for t in tasks]
Pan Zezhong's avatar
Pan Zezhong committed
365
366
367
368
369
370
371
372
373
374
375
376
        self.temperaturas_list = [t.temperature for t in tasks]
        self.topks_list = [t.topk for t in tasks]
        self.topps_list = [t.topp for t in tasks]

        # Flatten token lists
        flat_tokens = [tok for toks in token_lists for tok in toks]
        self.ntok = len(flat_tokens)

        # Convert to ctypes arrays in one pass
        self.tokens = (c_uint * self.ntok)(*flat_tokens)
        self.req_lens = (c_uint * self.nreq)(*self.req_lens_list)
        self.req_pos = (c_uint * self.nreq)(*self.req_pos_list)
377
        self.kv_caches = (POINTER(KVCacheCStruct) * self.nreq)(*self.kv_cache_ptrs)
Pan Zezhong's avatar
Pan Zezhong committed
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
        self.temperaturas = (c_float * self.nreq)(*self.temperaturas_list)
        self.topks = (c_uint * self.nreq)(*self.topks_list)
        self.topps = (c_float * self.nreq)(*self.topps_list)

    def input_args(self):
        return (
            self.tokens,
            self.ntok,
            self.req_lens,
            self.nreq,
            self.req_pos,
            self.kv_caches,
            self.temperaturas,
            self.topks,
            self.topps,
        )


PanZezhong's avatar
PanZezhong committed
396
class JiugeForCauslLM:
397
398
399
    def __init__(
        self, model_dir_path, device=DeviceType.DEVICE_TYPE_CPU, ndev=1, max_tokens=None
    ):
PanZezhong's avatar
PanZezhong committed
400
        def load_all_safetensors_from_dir(dir_path_: str):
PanZezhong's avatar
PanZezhong committed
401
402
403
404
405
            tensors_ = {}
            dir_path_ = Path(dir_path_)
            for file in sorted(dir_path_.glob("*.safetensors")):
                data_ = safetensors.safe_open(file, "pt")
                for name_ in data_.keys():
PanZezhong's avatar
PanZezhong committed
406
                    tensors_[name_] = data_.get_tensor(name_)
PanZezhong's avatar
PanZezhong committed
407
            return tensors_
408

PanZezhong's avatar
PanZezhong committed
409
410
        print("Loading model weights to host...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
411

Pan Zezhong's avatar
Pan Zezhong committed
412
413
        with open(os.path.join(model_dir_path, "config.json"), "r") as f:
            config = json.load(f)
PanZezhong's avatar
PanZezhong committed
414
415
            self.config = config
        eos_token_id = self.config["eos_token_id"]
416
417
418
419
420
421
        self.eos_token_id = (
            [eos_token_id] if type(eos_token_id) == int else eos_token_id
        )
        transpose_weight = (
            device != DeviceType.DEVICE_TYPE_ASCEND
        )  # y = xW is faster than y=xW^T on Ascend
Pan Zezhong's avatar
Pan Zezhong committed
422
        if "llama" == config["model_type"]:
423
424
425
426
427
            model = (
                transformers.LlamaForCausalLM.from_pretrained(model_dir_path)
                .cpu()
                .half()
            )
428
            self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
429
430
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir_path)
            self.weights = JiugeWeightsImpl(
431
432
433
434
435
                self.meta,
                LlamaWeightsNaming(),
                model.state_dict(),
                ndev=ndev,
                transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
436
            )
437
        elif "fm9g" == config["model_type"] or "minicpm" == config["model_type"]:
438
439
440
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
PanZezhong's avatar
PanZezhong committed
441
442
443
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
444
445
446
447
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
PanZezhong's avatar
PanZezhong committed
448
            if LlamaWeightsNaming.match(state_dict):
449
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
450
                self.weights = JiugeWeightsImpl(
451
452
453
454
455
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
456
457
458
459
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
460
461
            else:
                raise ValueError("Unsupported weight naming")
Pan Zezhong's avatar
Pan Zezhong committed
462
        elif "fm9g7b" == config["model_type"]:
Pan Zezhong's avatar
Pan Zezhong committed
463
464
465
466
467
468
469
470
471
472
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
Pan Zezhong's avatar
Pan Zezhong committed
473
            if LlamaWeightsNaming.match(state_dict):
474
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
Pan Zezhong's avatar
Pan Zezhong committed
475
                self.weights = JiugeWeightsImpl(
476
477
478
479
480
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
Pan Zezhong's avatar
Pan Zezhong committed
481
482
483
484
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
485
486
487
488
489
            else:
                raise ValueError("Unsupported weight naming")
        elif "qwen2" == config["model_type"]:
            state_dict = load_all_safetensors_from_dir(model_dir_path)
            if LlamaWeightsNaming.match(state_dict):
490
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
491
                self.weights = JiugeWeightsImpl(
492
493
494
495
496
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
497
498
499
500
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path
                )
PanZezhong's avatar
PanZezhong committed
501
502
        else:
            raise ValueError("Unsupported model architecture")
PanZezhong's avatar
PanZezhong committed
503
504
505

        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
506

PanZezhong's avatar
PanZezhong committed
507
508
        print(f"Creating model on {ndev} devices...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
509
        dev_ids = (c_int * ndev)(*[i for i in range(ndev)])
PanZezhong's avatar
PanZezhong committed
510
511
512
513
514
515
516
        self.model_instance = create_jiuge_model(
            byref(self.meta),
            byref(self.weights),
            device,
            ndev,
            dev_ids,
        )
PanZezhong's avatar
PanZezhong committed
517
518
        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
519

Pan Zezhong's avatar
Pan Zezhong committed
520
521
    def max_context_len(self):
        return self.meta.dctx
522

Pan Zezhong's avatar
Pan Zezhong committed
523
524
525
526
527
528
    def create_kv_cache(self):
        return create_kv_cache(self.model_instance)

    def drop_kv_cache(self, kv_cache):
        drop_kv_cache(self.model_instance, kv_cache)

Pan Zezhong's avatar
Pan Zezhong committed
529
530
531
532
533
534
535
    def batch_infer_one_round(self, tasks: List[InferTask]):
        output = (c_uint * len(tasks))()
        batch_inputs = JiugeBatchedTask(tasks)
        infer_batch(
            self.model_instance,
            *(batch_inputs.input_args()),
            output,
Pan Zezhong's avatar
Pan Zezhong committed
536
        )
Pan Zezhong's avatar
Pan Zezhong committed
537
        return list(output)
Pan Zezhong's avatar
Pan Zezhong committed
538

Pan Zezhong's avatar
Pan Zezhong committed
539
    def generate(self, input_content, max_steps, topp_=1.0, topk_=1, temperature_=1.0):
Pan Zezhong's avatar
Pan Zezhong committed
540
541
542
543
544
        input_content = self.tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": input_content}],
            add_generation_prompt=True,
            tokenize=False,
        )
PanZezhong's avatar
PanZezhong committed
545
546
        print(input_content, end="", flush=True)
        tokens = self.tokenizer.encode(input_content)
547
548
549
550
551
552
553
554
555
556
        infer_task = InferTask(
            0,
            tokens,
            self.max_context_len(),
            temperature_,
            topk_,
            topp_,
            self.eos_token_id,
        )
        infer_task.bind_kvcache(KVCache(self))
PanZezhong's avatar
PanZezhong committed
557
558

        steps = 0
Pan Zezhong's avatar
Pan Zezhong committed
559
        total_time = 0
560
        output_content = ""
Pan Zezhong's avatar
Pan Zezhong committed
561
562
563

        for step_i in range(max_steps):
            start_time = time.time()
564
            output_tokens = self.batch_infer_one_round([infer_task])
PanZezhong's avatar
PanZezhong committed
565
            end_time = time.time()
566
            steps += 1
PanZezhong's avatar
PanZezhong committed
567
568
569
570
571
572
573
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )
            output_content += output_str
            print(output_str, end="", flush=True)
PanZezhong's avatar
PanZezhong committed
574
575
            if output_tokens[0] in self.eos_token_id:
                break
576
            infer_task.next(output_tokens[0])
Pan Zezhong's avatar
Pan Zezhong committed
577

Pan Zezhong's avatar
Pan Zezhong committed
578
579
            if step_i > 0:
                total_time += end_time - start_time
PanZezhong's avatar
PanZezhong committed
580
581

        print("\n")
Pan Zezhong's avatar
Pan Zezhong committed
582
        avg_time = total_time * 1000 / (steps - 1)
PanZezhong's avatar
PanZezhong committed
583
        print(f"Time per step: {avg_time:.3f}ms")
584
585

        infer_task._kv_cache.drop(self)
PanZezhong's avatar
PanZezhong committed
586
        return output_content, avg_time
Pan Zezhong's avatar
Pan Zezhong committed
587

PanZezhong's avatar
PanZezhong committed
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
    def perplexity(self, test_sequences: List[Sequence[int]], batch_size=10):
        tasks = [
            InferTask(i, [], self.max_context_len(), 1.0, 1, 1.0, self.eos_token_id)
            for i in range(batch_size)
        ]
        kv_caches = [KVCache(self) for _ in range(batch_size)]

        nll = 0.0
        total_len = 0

        for i in range(0, len(test_sequences), batch_size):
            batch_id = 0
            true_tokens = []
            while batch_id < batch_size and batch_id + i < len(test_sequences):
                input_tokens = test_sequences[i + batch_id][:-1]
                true_tokens.extend(test_sequences[i + batch_id][1:])
                tasks[batch_id].tokens = input_tokens
                tasks[batch_id].bind_kvcache(kv_caches[batch_id])
                batch_id += 1

            batch_inputs = JiugeBatchedTask(tasks[:batch_id])
            logits = torch.zeros(
                (batch_inputs.ntok, self.meta.dvoc), dtype=self.meta.torch_dtype_logits
            )
            forward_batch(
                self.model_instance,
                batch_inputs.tokens,
                batch_inputs.ntok,
                batch_inputs.req_lens,
                batch_inputs.nreq,
                batch_inputs.req_pos,
                batch_inputs.kv_caches,
                logits.data_ptr(),
            )

            logits = logits.float()
            token_ids = torch.tensor(true_tokens, dtype=torch.int64)  # [ntok,]
            log_probs = torch.nn.functional.log_softmax(logits, dim=-1)  # (ntok, vocab)
            token_logprobs = log_probs[
                torch.arange(batch_inputs.ntok), token_ids
            ]  # (ntok,)

            start = 0
            for l in batch_inputs.req_lens_list:
                nll += -token_logprobs[start : start + l].sum().item()
                start += l
            total_len += token_logprobs.numel()

        for task in tasks:
            task.release_kvcache()

        return math.exp(nll / total_len)

PanZezhong's avatar
PanZezhong committed
641
642
643
    def destroy_model_instance(self):
        destroy_jiuge_model(self.model_instance)
        print("Model destroyed")
PanZezhong's avatar
PanZezhong committed
644
645
646
647
648


def test():
    if len(sys.argv) < 3:
        print(
Pan Zezhong's avatar
Pan Zezhong committed
649
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
        )
        sys.exit(1)
    model_path = sys.argv[2]
    device_type = DeviceType.DEVICE_TYPE_CPU
    if sys.argv[1] == "--cpu":
        device_type = DeviceType.DEVICE_TYPE_CPU
    elif sys.argv[1] == "--nvidia":
        device_type = DeviceType.DEVICE_TYPE_NVIDIA
    elif sys.argv[1] == "--cambricon":
        device_type = DeviceType.DEVICE_TYPE_CAMBRICON
    elif sys.argv[1] == "--ascend":
        device_type = DeviceType.DEVICE_TYPE_ASCEND
    elif sys.argv[1] == "--metax":
        device_type = DeviceType.DEVICE_TYPE_METAX
    elif sys.argv[1] == "--moore":
        device_type = DeviceType.DEVICE_TYPE_MOORE
zhangyue's avatar
zhangyue committed
666
667
    elif sys.argv[1] == "--iluvatar":
        device_type = DeviceType.DEVICE_TYPE_ILUVATAR
PanZezhong's avatar
PanZezhong committed
668
669
    else:
        print(
Pan Zezhong's avatar
Pan Zezhong committed
670
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
671
672
673
674
675
        )
        sys.exit(1)

    ndev = int(sys.argv[3]) if len(sys.argv) > 3 else 1
    model = JiugeForCauslLM(model_path, device_type, ndev)
Pan Zezhong's avatar
Pan Zezhong committed
676
    model.generate("山东最高的山是?", 500)
PanZezhong's avatar
PanZezhong committed
677
    model.destroy_model_instance()
PanZezhong's avatar
PanZezhong committed
678
679
680
681


if __name__ == "__main__":
    test()