jiuge.py 24.2 KB
Newer Older
PanZezhong's avatar
PanZezhong committed
1
from typing import List, Sequence
2
3
4
5
6
7
8
9
10
import math
import os
from pathlib import Path
import safetensors
import sys
import time
import json
import torch
import transformers
PanZezhong's avatar
PanZezhong committed
11

PanZezhong's avatar
PanZezhong committed
12
from libinfinicore_infer import (
13
    JiugeModel,
14
15
    JiugeMetaCStruct,
    JiugeWeightsCStruct,
PanZezhong's avatar
PanZezhong committed
16
17
    DataType,
    DeviceType,
18
    KVCacheCStruct,
PanZezhong's avatar
PanZezhong committed
19
)
20
from infer_task import InferTask, KVCache
Pan Zezhong's avatar
Pan Zezhong committed
21
22

from ctypes import POINTER, c_float, c_int, c_uint, c_void_p, byref
PanZezhong's avatar
PanZezhong committed
23

Pan Zezhong's avatar
Pan Zezhong committed
24
torch.set_default_device("cpu")
PanZezhong's avatar
PanZezhong committed
25

26

PanZezhong's avatar
PanZezhong committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
class LlamaWeightsNaming:
    def input_embd(self):
        return "model.embed_tokens.weight"

    def output_norm(self):
        return "model.norm.weight"

    def output_embd(self):
        return "lm_head.weight"

    def attn_norm(self, i):
        return f"model.layers.{i}.input_layernorm.weight"

    def attn_q(self, i):
        return f"model.layers.{i}.self_attn.q_proj.weight"

    def attn_k(self, i):
        return f"model.layers.{i}.self_attn.k_proj.weight"

    def attn_v(self, i):
        return f"model.layers.{i}.self_attn.v_proj.weight"

    def attn_o(self, i):
        return f"model.layers.{i}.self_attn.o_proj.weight"

    def attn_q_b(self, i):
        return f"model.layers.{i}.self_attn.q_proj.bias"

    def attn_k_b(self, i):
        return f"model.layers.{i}.self_attn.k_proj.bias"

    def attn_v_b(self, i):
        return f"model.layers.{i}.self_attn.v_proj.bias"

PanZezhong's avatar
PanZezhong committed
61
62
63
    def ffn_norm(self, i):
        return f"model.layers.{i}.post_attention_layernorm.weight"

PanZezhong's avatar
PanZezhong committed
64
65
66
67
68
69
70
71
72
    def gate(self, i):
        return f"model.layers.{i}.mlp.gate_proj.weight"

    def up(self, i):
        return f"model.layers.{i}.mlp.up_proj.weight"

    def down(self, i):
        return f"model.layers.{i}.mlp.down_proj.weight"

PanZezhong's avatar
PanZezhong committed
73
74
75
76
77
78
    def match(state_dict):
        return (
            "model.norm.weight" in state_dict
            and "model.layers.0.self_attn.q_proj.weight" in state_dict
        )

PanZezhong's avatar
PanZezhong committed
79

80
class JiugeMetaFromLlama(JiugeMetaCStruct):
81
    def __init__(self, config, dtype=torch.float16, max_tokens=None):
Pan Zezhong's avatar
Pan Zezhong committed
82
        if dtype == torch.float16:
PanZezhong's avatar
PanZezhong committed
83
84
85
            dt_ = DataType.INFINI_DTYPE_F16
        elif dtype == torch.float32:
            dt_ = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
86
87
        elif dtype == torch.bfloat16:
            dt_ = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
88
89
        else:
            dt_ = DataType.INFINI_DTYPE_F16
90

91
92
93
94
        self.scale_input = 1.0
        self.scale_output = 1.0
        self.scale_o = 1.0
        self.scale_down = 1.0
PanZezhong's avatar
PanZezhong committed
95
        if (
96
            config["model_type"] in ["fm9g", "minicpm"]
PanZezhong's avatar
PanZezhong committed
97
98
99
100
            and "scale_emb" in config
            and "scale_depth" in config
            and "dim_model_base" in config
        ):
101
102
103
104
105
106
107
108
            self.scale_input = config["scale_emb"]
            self.scale_output = config["hidden_size"] // config["dim_model_base"]
            self.scale_o = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
            self.scale_down = config["scale_depth"] / math.sqrt(
                config["num_hidden_layers"]
            )
109

PanZezhong's avatar
PanZezhong committed
110
        super().__init__(
PanZezhong's avatar
PanZezhong committed
111
            dt_logits=dt_,
Pan Zezhong's avatar
Pan Zezhong committed
112
113
114
            nlayer=config["num_hidden_layers"],
            d=config["hidden_size"],
            nh=config["num_attention_heads"],
PanZezhong's avatar
PanZezhong committed
115
            nkvh=(
Pan Zezhong's avatar
Pan Zezhong committed
116
117
118
                config["num_key_value_heads"]
                if "num_key_value_heads" in config
                else config["num_attention_heads"]
PanZezhong's avatar
PanZezhong committed
119
            ),
Pan Zezhong's avatar
Pan Zezhong committed
120
121
            dh=config["hidden_size"] // config["num_attention_heads"],
            di=config["intermediate_size"],
122
123
124
            dctx=(
                config["max_position_embeddings"] if max_tokens is None else max_tokens
            ),
Pan Zezhong's avatar
Pan Zezhong committed
125
126
127
            dvoc=config["vocab_size"],
            epsilon=config["rms_norm_eps"],
            theta=(config["rope_theta"] if "rope_theta" in config else 100000.0),
PanZezhong's avatar
PanZezhong committed
128
129
            end_token=2,
        )
PanZezhong's avatar
PanZezhong committed
130
        self.torch_dtype_logits = dtype
PanZezhong's avatar
PanZezhong committed
131
132


133
class JiugeWeightsImpl(JiugeWeightsCStruct):
Pan Zezhong's avatar
Pan Zezhong committed
134
135
136
137
138
139
140
141
    def __init__(
        self,
        meta,
        naming,
        state_dict,
        torch_dt_mat=torch.float16,
        torch_dt_norm=torch.float32,
        ndev=1,
PanZezhong's avatar
PanZezhong committed
142
        transpose_weight=True,
Pan Zezhong's avatar
Pan Zezhong committed
143
    ):
PanZezhong's avatar
PanZezhong committed
144
145
146
147
148
149
        nlayer = meta.nlayer
        nh = meta.nh
        nkvh = meta.nkvh
        dh = meta.dh
        d = meta.d
        di = meta.di
150
151
152
153
        scale_input = meta.scale_input
        scale_output = meta.scale_output
        scale_o = meta.scale_o
        scale_down = meta.scale_down
PanZezhong's avatar
PanZezhong committed
154
155
156
157
        assert nh % nkvh == 0
        assert nh % ndev == 0
        assert nkvh % ndev == 0
        assert di % ndev == 0
PanZezhong's avatar
PanZezhong committed
158
159
160
161
162
        torch_dt_logits = meta.torch_dtype_logits
        if torch_dt_mat == torch.float16:
            self.dt_mat = DataType.INFINI_DTYPE_F16
        elif torch_dt_mat == torch.float32:
            self.dt_mat = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
163
164
        elif torch_dt_mat == torch.bfloat16:
            self.dt_mat = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
165
166
167
168
169
170
        else:
            raise ValueError("Unsupported proj weight data type")
        if torch_dt_norm == torch.float16:
            self.dt_norm = DataType.INFINI_DTYPE_F16
        elif torch_dt_norm == torch.float32:
            self.dt_norm = DataType.INFINI_DTYPE_F32
PanZezhong's avatar
PanZezhong committed
171
172
        elif torch_dt_norm == torch.bfloat16:
            self.dt_norm = DataType.INFINI_DTYPE_BF16
PanZezhong's avatar
PanZezhong committed
173
174
        else:
            raise ValueError("Unsupported norm weight data type")
PanZezhong's avatar
PanZezhong committed
175

Pan Zezhong's avatar
Pan Zezhong committed
176
177
178
179
180
181
182
183
184
185
        input_embd_naming = (
            naming.input_embd()
            if naming.input_embd() in state_dict
            else naming.output_embd()
        )
        output_embd_naming = (
            naming.output_embd()
            if naming.output_embd() in state_dict
            else naming.input_embd()
        )
PanZezhong's avatar
PanZezhong committed
186
        self.transpose_linear_weights = 1 if transpose_weight else 0
PanZezhong's avatar
PanZezhong committed
187
        self.nlayer = nlayer
188
189
190
        self.input_embd_tensor = (
            state_dict[input_embd_naming].to(torch_dt_logits) * scale_input
        )
PanZezhong's avatar
PanZezhong committed
191
        self.input_embd = self.input_embd_tensor.data_ptr()
192
193
194
        self.output_norm_tensor = (
            state_dict[naming.output_norm()].to(torch_dt_norm) * scale_output
        )
PanZezhong's avatar
PanZezhong committed
195
        self.output_norm = self.output_norm_tensor.data_ptr()
Pan Zezhong's avatar
Pan Zezhong committed
196
        self.output_embd_tensor = state_dict[output_embd_naming].to(torch_dt_mat)
PanZezhong's avatar
PanZezhong committed
197
        if not transpose_weight:
198
199
200
            self.output_embd_tensor = self.output_embd_tensor.transpose(
                0, 1
            ).contiguous()
PanZezhong's avatar
PanZezhong committed
201
202
203
        self.output_embd = self.output_embd_tensor.data_ptr()

        self.attn_norm_tensors = [
PanZezhong's avatar
PanZezhong committed
204
            state_dict[naming.attn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
PanZezhong's avatar
PanZezhong committed
205
206
207
208
209
        ]
        self.attn_norm_ptrs = [
            self.attn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.attn_norm = (c_void_p * nlayer)(*self.attn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231

        def qkv_slices(_i):
            _Q = (
                state_dict[naming.attn_q(_i)]
                .reshape([nh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _K = (
                state_dict[naming.attn_k(_i)]
                .reshape([nkvh, 2, dh // 2, d])
                .transpose(1, 2)
            )
            _V = state_dict[naming.attn_v(_i)].reshape([nkvh, dh // 2, 2, d])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
                _result.append(_Q[_idev * _nh : (_idev + 1) * _nh, :, :, :])
                _result.append(_K[_idev * _nkvh : (_idev + 1) * _nkvh, :, :, :])
                _result.append(_V[_idev * _nkvh : (_idev + 1) * _nkvh, :, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
232
233
234
        self.qkv_tensor = [
            torch.concat(qkv_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
235
236
        if not transpose_weight:
            for i in range(nlayer):
237
238
239
240
241
242
                self.qkv_tensor[i] = (
                    self.qkv_tensor[i]
                    .reshape(ndev, (nh + 2 * nkvh) // ndev * dh, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
        self.qkv_tensor_ptrs = [self.qkv_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_qkv = (c_void_p * nlayer)(*self.qkv_tensor_ptrs)

        def qkv_b_slices(_i):
            _QB = (
                state_dict[naming.attn_q_b(_i)]
                .reshape([nh, 2, dh // 2])
                .transpose(1, 2)
            )
            _KB = (
                state_dict[naming.attn_k_b(_i)]
                .reshape([nkvh, 2, dh // 2])
                .transpose(1, 2)
            )
            _VB = state_dict[naming.attn_v_b(_i)].reshape([nkvh, dh // 2, 2])
            _result = []
            _nh = nh // ndev
            _nkvh = nkvh // ndev
            for _idev in range(ndev):
Pan Zezhong's avatar
Pan Zezhong committed
262
263
264
                _result.append(_QB[_idev * _nh : (_idev + 1) * _nh, :, :].flatten())
                _result.append(_KB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
                _result.append(_VB[_idev * _nkvh : (_idev + 1) * _nkvh, :, :].flatten())
PanZezhong's avatar
PanZezhong committed
265
266
267
            return _result

        if naming.attn_q_b(0) in state_dict:
Pan Zezhong's avatar
Pan Zezhong committed
268
269
270
            self.qkv_b_tensors = [
                torch.concat(qkv_b_slices(i)).to(torch_dt_logits) for i in range(nlayer)
            ]
PanZezhong's avatar
PanZezhong committed
271
272
273
274
275
276
277
            self.qkv_b_tensor_ptrs = [
                self.qkv_b_tensors[i].data_ptr() for i in range(nlayer)
            ]
            self.attn_qkv_b = (c_void_p * nlayer)(*self.qkv_b_tensor_ptrs)
        else:
            self.attn_qkv_b = None

PanZezhong's avatar
PanZezhong committed
278
        self.attn_o_tensor = [
279
280
            (
                state_dict[naming.attn_o(i)]
PanZezhong's avatar
PanZezhong committed
281
282
283
284
                .to(torch_dt_mat)
                .reshape([d, ndev, nh // ndev * dh])
                .transpose(0, 1)
                .contiguous()
285
286
287
288
289
290
                if transpose_weight
                else state_dict[naming.attn_o(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
291
            * scale_o
PanZezhong's avatar
PanZezhong committed
292
293
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
294
295
296
        self.attn_o_ptrs = [self.attn_o_tensor[i].data_ptr() for i in range(nlayer)]
        self.attn_o = (c_void_p * nlayer)(*self.attn_o_ptrs)

Pan Zezhong's avatar
Pan Zezhong committed
297
298
299
        self.ffn_norm_tensors = [
            state_dict[naming.ffn_norm(i)].to(torch_dt_norm) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
300
301
302
303
        self.ffn_norm_ptrs = [
            self.ffn_norm_tensors[i].data_ptr() for i in range(nlayer)
        ]
        self.ffn_norm = (c_void_p * nlayer)(*self.ffn_norm_ptrs)
PanZezhong's avatar
PanZezhong committed
304
305
306
307
308
309
310
311
312
313
314

        def gate_up_slices(_i):
            _result = []
            _di = di // ndev
            for _idev in range(ndev):
                _start = _idev * _di
                _end = (_idev + 1) * _di
                _result.append(state_dict[naming.gate(_i)][_start:_end, :])
                _result.append(state_dict[naming.up(_i)][_start:_end, :])
            return _result

Pan Zezhong's avatar
Pan Zezhong committed
315
316
317
        self.gate_up_tensors = [
            torch.concat(gate_up_slices(i)).to(torch_dt_mat) for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
318
319
        if not transpose_weight:
            for i in range(nlayer):
320
321
322
323
324
325
                self.gate_up_tensors[i] = (
                    self.gate_up_tensors[i]
                    .reshape(ndev, 2 * di // ndev, d)
                    .transpose(1, 2)
                    .contiguous()
                )
PanZezhong's avatar
PanZezhong committed
326
327
        self.gate_up_ptrs = [self.gate_up_tensors[i].data_ptr() for i in range(nlayer)]
        self.ffn_gate_up = (c_void_p * nlayer)(*self.gate_up_ptrs)
PanZezhong's avatar
PanZezhong committed
328
329

        self.ffn_down_tensor = [
330
331
332
333
334
335
336
337
338
339
340
341
            (
                state_dict[naming.down(i)]
                .to(torch_dt_mat)
                .reshape([d, ndev, di // ndev])
                .transpose(0, 1)
                .contiguous()
                if transpose_weight
                else state_dict[naming.down(i)]
                .transpose(0, 1)
                .to(torch_dt_mat)
                .contiguous()
            )
342
            * scale_down
PanZezhong's avatar
PanZezhong committed
343
344
            for i in range(nlayer)
        ]
PanZezhong's avatar
PanZezhong committed
345
346
        self.ffn_down_ptrs = [self.ffn_down_tensor[i].data_ptr() for i in range(nlayer)]
        self.ffn_down = (c_void_p * nlayer)(*self.ffn_down_ptrs)
PanZezhong's avatar
PanZezhong committed
347
348


Pan Zezhong's avatar
Pan Zezhong committed
349
350
351
352
353
354
355
356
357
class JiugeBatchedTask:
    def __init__(self, tasks: List[InferTask]):
        self.tasks = tasks
        self.nreq = len(tasks)

        # Precompute fields
        token_lists = [t.tokens for t in tasks]
        self.req_lens_list = [len(toks) for toks in token_lists]
        self.req_pos_list = [t.pos for t in tasks]
358
        self.kv_cache_ptrs = [t.kvcache().data() for t in tasks]
Pan Zezhong's avatar
Pan Zezhong committed
359
360
361
362
363
364
365
366
367
368
369
370
        self.temperaturas_list = [t.temperature for t in tasks]
        self.topks_list = [t.topk for t in tasks]
        self.topps_list = [t.topp for t in tasks]

        # Flatten token lists
        flat_tokens = [tok for toks in token_lists for tok in toks]
        self.ntok = len(flat_tokens)

        # Convert to ctypes arrays in one pass
        self.tokens = (c_uint * self.ntok)(*flat_tokens)
        self.req_lens = (c_uint * self.nreq)(*self.req_lens_list)
        self.req_pos = (c_uint * self.nreq)(*self.req_pos_list)
371
        self.kv_caches = (POINTER(KVCacheCStruct) * self.nreq)(*self.kv_cache_ptrs)
Pan Zezhong's avatar
Pan Zezhong committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
        self.temperaturas = (c_float * self.nreq)(*self.temperaturas_list)
        self.topks = (c_uint * self.nreq)(*self.topks_list)
        self.topps = (c_float * self.nreq)(*self.topps_list)

    def input_args(self):
        return (
            self.tokens,
            self.ntok,
            self.req_lens,
            self.nreq,
            self.req_pos,
            self.kv_caches,
            self.temperaturas,
            self.topks,
            self.topps,
        )


PanZezhong's avatar
PanZezhong committed
390
class JiugeForCauslLM:
391
392
393
    def __init__(
        self, model_dir_path, device=DeviceType.DEVICE_TYPE_CPU, ndev=1, max_tokens=None
    ):
PanZezhong's avatar
PanZezhong committed
394
        def load_all_safetensors_from_dir(dir_path_: str):
PanZezhong's avatar
PanZezhong committed
395
396
397
398
399
            tensors_ = {}
            dir_path_ = Path(dir_path_)
            for file in sorted(dir_path_.glob("*.safetensors")):
                data_ = safetensors.safe_open(file, "pt")
                for name_ in data_.keys():
PanZezhong's avatar
PanZezhong committed
400
                    tensors_[name_] = data_.get_tensor(name_)
PanZezhong's avatar
PanZezhong committed
401
            return tensors_
402

PanZezhong's avatar
PanZezhong committed
403
404
        print("Loading model weights to host...")
        load_start_time = time.time()
PanZezhong's avatar
PanZezhong committed
405

Pan Zezhong's avatar
Pan Zezhong committed
406
407
        with open(os.path.join(model_dir_path, "config.json"), "r") as f:
            config = json.load(f)
PanZezhong's avatar
PanZezhong committed
408
409
            self.config = config
        eos_token_id = self.config["eos_token_id"]
410
411
412
413
414
415
        self.eos_token_id = (
            [eos_token_id] if type(eos_token_id) == int else eos_token_id
        )
        transpose_weight = (
            device != DeviceType.DEVICE_TYPE_ASCEND
        )  # y = xW is faster than y=xW^T on Ascend
416
417
418

        self.jiuge_model = JiugeModel()

Pan Zezhong's avatar
Pan Zezhong committed
419
        if "llama" == config["model_type"]:
420
421
422
423
424
            model = (
                transformers.LlamaForCausalLM.from_pretrained(model_dir_path)
                .cpu()
                .half()
            )
425
            self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
426
427
            self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir_path)
            self.weights = JiugeWeightsImpl(
428
429
430
431
432
                self.meta,
                LlamaWeightsNaming(),
                model.state_dict(),
                ndev=ndev,
                transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
433
            )
434
        elif "fm9g" == config["model_type"] or "minicpm" == config["model_type"]:
435
436
437
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
PanZezhong's avatar
PanZezhong committed
438
439
440
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
441
442
443
444
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
PanZezhong's avatar
PanZezhong committed
445
            if LlamaWeightsNaming.match(state_dict):
446
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
447
                self.weights = JiugeWeightsImpl(
448
449
450
451
452
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
453
454
455
456
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
457
458
            else:
                raise ValueError("Unsupported weight naming")
Pan Zezhong's avatar
Pan Zezhong committed
459
        elif "fm9g7b" == config["model_type"]:
Pan Zezhong's avatar
Pan Zezhong committed
460
461
462
463
464
465
466
467
468
469
            if any(
                file.suffix == ".safetensors" for file in Path(model_dir_path).iterdir()
            ):
                state_dict = load_all_safetensors_from_dir(model_dir_path)
            else:
                state_dict = torch.load(
                    os.path.join(model_dir_path, "pytorch_model.bin"),
                    weights_only=True,
                    map_location="cpu",
                )
Pan Zezhong's avatar
Pan Zezhong committed
470
            if LlamaWeightsNaming.match(state_dict):
471
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
Pan Zezhong's avatar
Pan Zezhong committed
472
                self.weights = JiugeWeightsImpl(
473
474
475
476
477
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
Pan Zezhong's avatar
Pan Zezhong committed
478
479
480
481
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path, trust_remote_code=True
                )
PanZezhong's avatar
PanZezhong committed
482
483
484
485
486
            else:
                raise ValueError("Unsupported weight naming")
        elif "qwen2" == config["model_type"]:
            state_dict = load_all_safetensors_from_dir(model_dir_path)
            if LlamaWeightsNaming.match(state_dict):
487
                self.meta = JiugeMetaFromLlama(config, max_tokens=max_tokens)
PanZezhong's avatar
PanZezhong committed
488
                self.weights = JiugeWeightsImpl(
489
490
491
492
493
                    self.meta,
                    LlamaWeightsNaming(),
                    state_dict,
                    ndev=ndev,
                    transpose_weight=transpose_weight,
PanZezhong's avatar
PanZezhong committed
494
495
496
497
                )
                self.tokenizer = transformers.AutoTokenizer.from_pretrained(
                    model_dir_path
                )
PanZezhong's avatar
PanZezhong committed
498
499
        else:
            raise ValueError("Unsupported model architecture")
PanZezhong's avatar
PanZezhong committed
500
501
502

        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
503

PanZezhong's avatar
PanZezhong committed
504
505
        print(f"Creating model on {ndev} devices...")
        load_start_time = time.time()
blkmjsian's avatar
blkmjsian committed
506
507
508
        self.dev_ids = (c_int * ndev)(*[i for i in range(ndev)])
        self.ndev = ndev
        self.device = device
509
510

        self.model_instance = self.jiuge_model.create_model(
PanZezhong's avatar
PanZezhong committed
511
512
513
514
            byref(self.meta),
            byref(self.weights),
            device,
            ndev,
blkmjsian's avatar
blkmjsian committed
515
            self.dev_ids,
PanZezhong's avatar
PanZezhong committed
516
        )
PanZezhong's avatar
PanZezhong committed
517
518
        load_end_time = time.time()
        print(f"Time used: {load_end_time - load_start_time:.3f}s")
519

Pan Zezhong's avatar
Pan Zezhong committed
520
521
    def max_context_len(self):
        return self.meta.dctx
522

Pan Zezhong's avatar
Pan Zezhong committed
523
    def create_kv_cache(self):
524
        return self.jiuge_model.create_kv_cache(
blkmjsian's avatar
blkmjsian committed
525
526
527
528
529
530
531
532
533
534
            self.meta.nlayer,
            self.meta.dctx,
            self.meta.nkvh,
            self.meta.dh,
            self.meta.dh,
            self.meta.dt_logits,
            self.device,
            self.dev_ids,
            self.ndev,
        )
Pan Zezhong's avatar
Pan Zezhong committed
535
536

    def drop_kv_cache(self, kv_cache):
537
        self.jiuge_model.drop_kv_cache(kv_cache)
Pan Zezhong's avatar
Pan Zezhong committed
538

Pan Zezhong's avatar
Pan Zezhong committed
539
540
541
    def batch_infer_one_round(self, tasks: List[InferTask]):
        output = (c_uint * len(tasks))()
        batch_inputs = JiugeBatchedTask(tasks)
542
        self.jiuge_model.infer_batch(
Pan Zezhong's avatar
Pan Zezhong committed
543
544
545
            self.model_instance,
            *(batch_inputs.input_args()),
            output,
Pan Zezhong's avatar
Pan Zezhong committed
546
        )
Pan Zezhong's avatar
Pan Zezhong committed
547
        return list(output)
Pan Zezhong's avatar
Pan Zezhong committed
548

Pan Zezhong's avatar
Pan Zezhong committed
549
    def generate(self, input_content, max_steps, topp_=1.0, topk_=1, temperature_=1.0):
Pan Zezhong's avatar
Pan Zezhong committed
550
551
552
553
554
        input_content = self.tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": input_content}],
            add_generation_prompt=True,
            tokenize=False,
        )
PanZezhong's avatar
PanZezhong committed
555
556
        print(input_content, end="", flush=True)
        tokens = self.tokenizer.encode(input_content)
557
558
559
560
561
562
563
564
565
566
        infer_task = InferTask(
            0,
            tokens,
            self.max_context_len(),
            temperature_,
            topk_,
            topp_,
            self.eos_token_id,
        )
        infer_task.bind_kvcache(KVCache(self))
PanZezhong's avatar
PanZezhong committed
567
568

        steps = 0
Pan Zezhong's avatar
Pan Zezhong committed
569
        total_time = 0
570
        output_content = ""
Pan Zezhong's avatar
Pan Zezhong committed
571
572
573

        for step_i in range(max_steps):
            start_time = time.time()
574
            output_tokens = self.batch_infer_one_round([infer_task])
PanZezhong's avatar
PanZezhong committed
575
            end_time = time.time()
576
            steps += 1
PanZezhong's avatar
PanZezhong committed
577
578
579
580
581
582
583
            output_str = (
                self.tokenizer._tokenizer.id_to_token(output_tokens[0])
                .replace("▁", " ")
                .replace("<0x0A>", "\n")
            )
            output_content += output_str
            print(output_str, end="", flush=True)
PanZezhong's avatar
PanZezhong committed
584
585
            if output_tokens[0] in self.eos_token_id:
                break
586
            infer_task.next(output_tokens[0])
Pan Zezhong's avatar
Pan Zezhong committed
587

Pan Zezhong's avatar
Pan Zezhong committed
588
589
            if step_i > 0:
                total_time += end_time - start_time
PanZezhong's avatar
PanZezhong committed
590
591

        print("\n")
Pan Zezhong's avatar
Pan Zezhong committed
592
        avg_time = total_time * 1000 / (steps - 1)
PanZezhong's avatar
PanZezhong committed
593
        print(f"Time per step: {avg_time:.3f}ms")
594
595

        infer_task._kv_cache.drop(self)
PanZezhong's avatar
PanZezhong committed
596
        return output_content, avg_time
Pan Zezhong's avatar
Pan Zezhong committed
597

PanZezhong's avatar
PanZezhong committed
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
    def perplexity(self, test_sequences: List[Sequence[int]], batch_size=10):
        tasks = [
            InferTask(i, [], self.max_context_len(), 1.0, 1, 1.0, self.eos_token_id)
            for i in range(batch_size)
        ]
        kv_caches = [KVCache(self) for _ in range(batch_size)]

        nll = 0.0
        total_len = 0

        for i in range(0, len(test_sequences), batch_size):
            batch_id = 0
            true_tokens = []
            while batch_id < batch_size and batch_id + i < len(test_sequences):
                input_tokens = test_sequences[i + batch_id][:-1]
                true_tokens.extend(test_sequences[i + batch_id][1:])
                tasks[batch_id].tokens = input_tokens
                tasks[batch_id].bind_kvcache(kv_caches[batch_id])
                batch_id += 1

            batch_inputs = JiugeBatchedTask(tasks[:batch_id])
            logits = torch.zeros(
                (batch_inputs.ntok, self.meta.dvoc), dtype=self.meta.torch_dtype_logits
            )
622
            self.jiuge_model.forward_batch(
PanZezhong's avatar
PanZezhong committed
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
                self.model_instance,
                batch_inputs.tokens,
                batch_inputs.ntok,
                batch_inputs.req_lens,
                batch_inputs.nreq,
                batch_inputs.req_pos,
                batch_inputs.kv_caches,
                logits.data_ptr(),
            )

            logits = logits.float()
            token_ids = torch.tensor(true_tokens, dtype=torch.int64)  # [ntok,]
            log_probs = torch.nn.functional.log_softmax(logits, dim=-1)  # (ntok, vocab)
            token_logprobs = log_probs[
                torch.arange(batch_inputs.ntok), token_ids
            ]  # (ntok,)

            start = 0
            for l in batch_inputs.req_lens_list:
                nll += -token_logprobs[start : start + l].sum().item()
                start += l
            total_len += token_logprobs.numel()

        for task in tasks:
            task.release_kvcache()

        return math.exp(nll / total_len)

PanZezhong's avatar
PanZezhong committed
651
    def destroy_model_instance(self):
652
        self.jiuge_model.destroy_model(self.model_instance)
PanZezhong's avatar
PanZezhong committed
653
        print("Model destroyed")
PanZezhong's avatar
PanZezhong committed
654
655
656
657
658


def test():
    if len(sys.argv) < 3:
        print(
zhuyue's avatar
zhuyue committed
659
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
        )
        sys.exit(1)
    model_path = sys.argv[2]
    device_type = DeviceType.DEVICE_TYPE_CPU
    if sys.argv[1] == "--cpu":
        device_type = DeviceType.DEVICE_TYPE_CPU
    elif sys.argv[1] == "--nvidia":
        device_type = DeviceType.DEVICE_TYPE_NVIDIA
    elif sys.argv[1] == "--cambricon":
        device_type = DeviceType.DEVICE_TYPE_CAMBRICON
    elif sys.argv[1] == "--ascend":
        device_type = DeviceType.DEVICE_TYPE_ASCEND
    elif sys.argv[1] == "--metax":
        device_type = DeviceType.DEVICE_TYPE_METAX
    elif sys.argv[1] == "--moore":
        device_type = DeviceType.DEVICE_TYPE_MOORE
zhangyue's avatar
zhangyue committed
676
677
    elif sys.argv[1] == "--iluvatar":
        device_type = DeviceType.DEVICE_TYPE_ILUVATAR
zhangyue's avatar
zhangyue committed
678
679
    elif sys.argv[1] == "--kunlun":
        device_type = DeviceType.DEVICE_TYPE_KUNLUN
zhuyue's avatar
zhuyue committed
680
681
    elif sys.argv[1] == "--hygon":
        device_type = DeviceType.DEVICE_TYPE_HYGON
PanZezhong's avatar
PanZezhong committed
682
683
    else:
        print(
zhuyue's avatar
zhuyue committed
684
            "Usage: python jiuge.py [--cpu | --nvidia| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon] <path/to/model_dir> [n_device]"
PanZezhong's avatar
PanZezhong committed
685
686
687
688
689
        )
        sys.exit(1)

    ndev = int(sys.argv[3]) if len(sys.argv) > 3 else 1
    model = JiugeForCauslLM(model_path, device_type, ndev)
Pan Zezhong's avatar
Pan Zezhong committed
690
    model.generate("山东最高的山是?", 500)
PanZezhong's avatar
PanZezhong committed
691
    model.destroy_model_instance()
PanZezhong's avatar
PanZezhong committed
692
693
694
695


if __name__ == "__main__":
    test()