jiuge.py 8.99 KB
Newer Older
1
2
3
4
5
import infinicore
from transformers import AutoTokenizer
from tokenizers import decoders as _dec
from infinilm.modeling_utils import load_model_state_dict_by_file
from infinilm.distributed import DistConfig
6
from infinilm.infer_engine import GenerationConfig, InferEngine
7
8
9
10
import argparse
import sys
import time
import os
11
import numpy as np
12
from infinilm.cache import StaticKVCacheConfig, PagedKVCacheConfig
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))


def get_args():
    parser = argparse.ArgumentParser(description="run Llama args")

    parser.add_argument(
        "--cpu",
        action="store_true",
        help="Run cpu test",
    )
    parser.add_argument(
        "--nvidia",
        action="store_true",
        help="Run nvidia test",
    )
30
31
32
33
34
    parser.add_argument(
        "--qy",
        action="store_true",
        help="Run qy test",
    )
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
    parser.add_argument(
        "--metax",
        action="store_true",
        help="Run metax test",
    )
    parser.add_argument(
        "--moore",
        action="store_true",
        help="Run moore test",
    )
    parser.add_argument(
        "--iluvatar",
        action="store_true",
        help="Run iluvatar test",
    )
50
51
52
53
54
    parser.add_argument(
        "--cambricon",
        action="store_true",
        help="Run cambricon test",
    )
wooway777's avatar
wooway777 committed
55
56
57
58
59
    parser.add_argument(
        "--ali",
        action="store_true",
        help="Run alippu test",
    )
60
61
62
63
64
    parser.add_argument(
        "--hygon",
        action="store_true",
        help="Run hygon test",
    )
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    parser.add_argument(
        "--model_path",
        type=str,
        required=True,
        help="model_path",
    )
    parser.add_argument(
        "--max_new_tokens",
        type=int,
        default=100,
        help="max_new_tokens",
    )
    parser.add_argument(
        "--backend",
        type=str,
Your Name's avatar
Your Name committed
80
        default="cpp",
81
82
83
        help="python or cpp model",
    )
    parser.add_argument(
pengcheng888's avatar
pengcheng888 committed
84
        "--batch-size",
85
86
87
88
89
90
91
92
93
94
95
96
97
        type=int,
        default=1,
        help="number of prompts in a batch",
    )
    parser.add_argument(
        "--prompt",
        type=str,
        default="How are you",
        help="input prompt",
    )
    parser.add_argument(
        "--tp",
        type=int,
Your Name's avatar
Your Name committed
98
        default=1,
99
100
        help="total rank for tensor parallel",
    )
101
102
103
104
105
    parser.add_argument(
        "--enable-paged-attn",
        action="store_true",
        help="use paged cache",
    )
106
107
108
109
110
    parser.add_argument(
        "--enable-graph",
        action="store_true",
        help="enable graph compiling",
    )
111

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
    parser.add_argument(
        "--top-k",
        type=int,
        default=1,
        help="top k sampling",
    )

    parser.add_argument(
        "--top-p",
        type=float,
        default=1.0,
        help="top p sampling",
    )

    parser.add_argument(
        "--temperature",
        type=float,
        default=1.0,
        help="sampling temperature",
    )

133
134
135
136
137
138
139
140
    return parser.parse_args()


def test(
    prompts: str | list[str],
    model_path,
    max_new_tokens=100,
    infini_device=infinicore.device("cpu", 0),
Your Name's avatar
Your Name committed
141
    tp=1,
142
    enable_paged_attn=False,
143
    enable_graph=False,
144
145
146
    top_k=1,
    top_p=1.0,
    temperature=1.0,
147
148
149
):
    model_path = os.path.expanduser(model_path)
    # ---------------------------------------------------------------------------- #
150
    #                        Create Model
151
    # ---------------------------------------------------------------------------- #
152
    model = InferEngine(
153
154
        model_path,
        device=infini_device,
Your Name's avatar
Your Name committed
155
        distributed_config=DistConfig(tp),
156
        enable_graph_compiling=enable_graph,
157
158
    )
    # ---------------------------------------------------------------------------- #
159
    #                        Load Weights
160
    # ---------------------------------------------------------------------------- #
161
    load_model_state_dict_by_file(model, model_path, dtype=model.config.dtype)
162
163

    # ---------------------------------------------------------------------------- #
164
    #                        create tokenizer
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    # ---------------------------------------------------------------------------- #
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    if "llama" == model.config.model_type:
        backend = getattr(tokenizer, "backend_tokenizer", None)
        target = getattr(backend, "_tokenizer", backend)
        norm = getattr(target, "normalizer", None)
        dec = getattr(target, "decoder", None)
        sn = repr(norm)[:800] if norm is not None else ""
        sd = repr(dec)[:800] if dec is not None else ""
        has_prepend = "Prepend" in sn
        has_strip = "Strip" in sd
        if has_prepend and has_strip:
            target.decoder = _dec.Sequence(
                [
                    _dec.Replace("▁", " "),
                    _dec.ByteFallback(),
                    _dec.Fuse(),
                ]
            )

    # ---------------------------------------------------------------------------- #
186
    #                        tokenize
187
188
189
190
191
192
193
194
195
196
197
198
    # ---------------------------------------------------------------------------- #
    # prompt = "山东最高的山是?"
    if isinstance(prompts, str):
        prompts = [prompts]
    input_contents = [
        tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": prompt}],
            add_generation_prompt=True,
            tokenize=False,
        )
        for prompt in prompts
    ]
PanZezhong's avatar
PanZezhong committed
199

200
201
202
203
204
205
206
207
208
209
210
211
212
    # input_ids_list = tokenizer.batch_encode_plus(input_contents)[
    #     "input_ids"
    # ]  # List: [[1, 1128, 526, 366, 29892]]
    
    input_ids_list = [
        tokenizer._encode_plus(
            text,
            truncation=True,
            max_length=2048,
            add_special_tokens=True
        )["input_ids"]
        for text in input_contents
    ]
213

214
    # ---------------------------------------------------------------------------- #
215
    #                       Create KVCache
216
217
    # ---------------------------------------------------------------------------- #
    if enable_paged_attn:
218
219
        batch_size = 1 if prompts is str else len(prompts)
        max_total_tokens = max_new_tokens + len(input_ids_list[0])
220
        cache_config = PagedKVCacheConfig(
221
            num_blocks=((max_total_tokens + 15) // 16) * batch_size, block_size=16
222
223
224
225
226
227
228
229
230
        )
    else:
        batch_size = 1 if prompts is str else len(prompts)
        initial_capacity = max_new_tokens + len(input_ids_list[0])
        cache_config = StaticKVCacheConfig(
            max_batch_size=batch_size, max_cache_len=initial_capacity
        )

    model.reset_cache(cache_config)
PanZezhong's avatar
PanZezhong committed
231

232
    # ---------------------------------------------------------------------------- #
233
    #                        Generate
234
    # ---------------------------------------------------------------------------- #
PanZezhong's avatar
PanZezhong committed
235
    print(input_contents[0], end="", flush=True)
236
237
238
239
    input_ids_infini = infinicore.from_list(input_ids_list)

    t1 = time.time()
    print("=================== start generate ====================")
240
    output_ids = model.generate(
241
        input_ids_infini,
242
        GenerationConfig(
243
244
245
246
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            top_k=top_k,
            top_p=top_p,
247
248
        ),
        _measure_and_log_time=True,
249
250
251
    )
    t2 = time.time()

252
253
254
    numpy_output_ids = np.array([output_id.to_numpy()[0] for output_id in output_ids])
    print(tokenizer.decode(numpy_output_ids, skip_special_tokens=True))

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
    print(
        f"total_time: {round((t2 - t1) * 1000, 2)} ms",
    )


if __name__ == "__main__":
    args = get_args()
    print(args)

    # Parse command line arguments
    device_str = "cpu"
    if args.cpu:
        device_str = "cpu"
    elif args.nvidia:
        device_str = "cuda"
270
271
    elif args.qy:
        device_str = "cuda"
272
273
274
275
276
277
    elif args.metax:
        device_str = "cuda"
    elif args.moore:
        device_str = "musa"
    elif args.iluvatar:
        device_str = "cuda"
278
279
    elif args.cambricon:
        device_str = "mlu"
wooway777's avatar
wooway777 committed
280
281
    elif args.ali:
        device_str = "cuda"
282
283
    elif args.hygon:
        device_str = "cuda"
284
285
    else:
        print(
286
            "Usage:  python examples/jiuge.py [--cpu | --nvidia | --qy | --metax | --moore | --iluvatar | --cambricon | --ali | --hygon] --model_path=<path/to/model_dir>\n"
pengcheng888's avatar
pengcheng888 committed
287
            "such as, python examples/jiuge.py --nvidia --model_path=~/TinyLlama-1.1B-Chat-v1.0"
288
289
290
291
292
293
294
        )
        sys.exit(1)
    prompts = [args.prompt for _ in range(args.batch_size)]

    model_path = args.model_path
    max_new_tokens = args.max_new_tokens
    backend = args.backend
Your Name's avatar
Your Name committed
295
    tp = args.tp
296
    enable_paged_attn = args.enable_paged_attn
297
    enable_graph = args.enable_graph
298
299
300
    if backend != "cpp":
        raise ValueError(f"Unsupported backend: {backend}.")

301
302
303
304
305
306
307
    infini_device = infinicore.device(device_str, 0)

    test(
        prompts,
        model_path,
        max_new_tokens,
        infini_device=infini_device,
Your Name's avatar
Your Name committed
308
        tp=tp,
309
        enable_paged_attn=enable_paged_attn,
310
        enable_graph=enable_graph,
311
312
313
        top_k=args.top_k,
        top_p=args.top_p,
        temperature=args.temperature,
314
    )