jiuge.py 9.51 KB
Newer Older
1
import infinicore
2
import transformers
3
4
5
6
from transformers import AutoTokenizer
from tokenizers import decoders as _dec
from infinilm.modeling_utils import load_model_state_dict_by_file
from infinilm.distributed import DistConfig
7
from infinilm.infer_engine import GenerationConfig, InferEngine
8
9
10
11
import argparse
import sys
import time
import os
12
import numpy as np
13
from infinilm.cache import StaticKVCacheConfig, PagedKVCacheConfig
14
from packaging import version
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))


def get_args():
    parser = argparse.ArgumentParser(description="run Llama args")

    parser.add_argument(
        "--cpu",
        action="store_true",
        help="Run cpu test",
    )
    parser.add_argument(
        "--nvidia",
        action="store_true",
        help="Run nvidia test",
    )
32
33
34
35
36
    parser.add_argument(
        "--qy",
        action="store_true",
        help="Run qy test",
    )
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
    parser.add_argument(
        "--metax",
        action="store_true",
        help="Run metax test",
    )
    parser.add_argument(
        "--moore",
        action="store_true",
        help="Run moore test",
    )
    parser.add_argument(
        "--iluvatar",
        action="store_true",
        help="Run iluvatar test",
    )
52
53
54
55
56
    parser.add_argument(
        "--cambricon",
        action="store_true",
        help="Run cambricon test",
    )
wooway777's avatar
wooway777 committed
57
58
59
60
61
    parser.add_argument(
        "--ali",
        action="store_true",
        help="Run alippu test",
    )
62
63
64
65
66
    parser.add_argument(
        "--hygon",
        action="store_true",
        help="Run hygon test",
    )
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
    parser.add_argument(
        "--model_path",
        type=str,
        required=True,
        help="model_path",
    )
    parser.add_argument(
        "--max_new_tokens",
        type=int,
        default=100,
        help="max_new_tokens",
    )
    parser.add_argument(
        "--backend",
        type=str,
Your Name's avatar
Your Name committed
82
        default="cpp",
83
84
85
        help="python or cpp model",
    )
    parser.add_argument(
pengcheng888's avatar
pengcheng888 committed
86
        "--batch-size",
87
88
89
90
91
92
93
94
95
96
97
98
99
        type=int,
        default=1,
        help="number of prompts in a batch",
    )
    parser.add_argument(
        "--prompt",
        type=str,
        default="How are you",
        help="input prompt",
    )
    parser.add_argument(
        "--tp",
        type=int,
Your Name's avatar
Your Name committed
100
        default=1,
101
102
        help="total rank for tensor parallel",
    )
103
104
105
106
107
    parser.add_argument(
        "--enable-paged-attn",
        action="store_true",
        help="use paged cache",
    )
108
109
110
111
112
    parser.add_argument(
        "--enable-graph",
        action="store_true",
        help="enable graph compiling",
    )
113

114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
    parser.add_argument(
        "--top-k",
        type=int,
        default=1,
        help="top k sampling",
    )

    parser.add_argument(
        "--top-p",
        type=float,
        default=1.0,
        help="top p sampling",
    )

    parser.add_argument(
        "--temperature",
        type=float,
        default=1.0,
        help="sampling temperature",
    )

135
136
137
138
139
140
141
142
    return parser.parse_args()


def test(
    prompts: str | list[str],
    model_path,
    max_new_tokens=100,
    infini_device=infinicore.device("cpu", 0),
Your Name's avatar
Your Name committed
143
    tp=1,
144
    enable_paged_attn=False,
145
    enable_graph=False,
146
147
148
    top_k=1,
    top_p=1.0,
    temperature=1.0,
149
150
151
):
    model_path = os.path.expanduser(model_path)
    # ---------------------------------------------------------------------------- #
152
    #                        Create Model
153
    # ---------------------------------------------------------------------------- #
154
    model = InferEngine(
155
156
        model_path,
        device=infini_device,
Your Name's avatar
Your Name committed
157
        distributed_config=DistConfig(tp),
158
        enable_graph_compiling=enable_graph,
159
160
    )
    # ---------------------------------------------------------------------------- #
161
    #                        Load Weights
162
    # ---------------------------------------------------------------------------- #
163
    load_model_state_dict_by_file(model, model_path, dtype=model.config.dtype)
164
165

    # ---------------------------------------------------------------------------- #
166
    #                        create tokenizer
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
    # ---------------------------------------------------------------------------- #
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    if "llama" == model.config.model_type:
        backend = getattr(tokenizer, "backend_tokenizer", None)
        target = getattr(backend, "_tokenizer", backend)
        norm = getattr(target, "normalizer", None)
        dec = getattr(target, "decoder", None)
        sn = repr(norm)[:800] if norm is not None else ""
        sd = repr(dec)[:800] if dec is not None else ""
        has_prepend = "Prepend" in sn
        has_strip = "Strip" in sd
        if has_prepend and has_strip:
            target.decoder = _dec.Sequence(
                [
                    _dec.Replace("▁", " "),
                    _dec.ByteFallback(),
                    _dec.Fuse(),
                ]
            )

    # ---------------------------------------------------------------------------- #
188
    #                        tokenize
189
190
191
192
193
194
195
196
197
198
199
200
    # ---------------------------------------------------------------------------- #
    # prompt = "山东最高的山是?"
    if isinstance(prompts, str):
        prompts = [prompts]
    input_contents = [
        tokenizer.apply_chat_template(
            conversation=[{"role": "user", "content": prompt}],
            add_generation_prompt=True,
            tokenize=False,
        )
        for prompt in prompts
    ]
PanZezhong's avatar
PanZezhong committed
201

202
203
204
    # input_ids_list = tokenizer.batch_encode_plus(input_contents)[
    #     "input_ids"
    # ]  # List: [[1, 1128, 526, 366, 29892]]
205
    if version.parse(transformers.__version__) < version.parse("5.0.0"):
206
207
208
209
210
211
212
213
214
215
216
217
218
219
        # Ideally this is solved by upgrading transformers. However, doing so causes version mismatch between transformers and mlu pytorch on devices with Phytium CPU. So a branch is temporarily used.
        input_ids_list = [
            tokenizer.encode_plus(
                text, truncation=True, max_length=2048, add_special_tokens=True
            )["input_ids"]
            for text in input_contents
        ]
    else:
        input_ids_list = [
            tokenizer._encode_plus(
                text, truncation=True, max_length=2048, add_special_tokens=True
            )["input_ids"]
            for text in input_contents
        ]
220

221
    # ---------------------------------------------------------------------------- #
222
    #                       Create KVCache
223
224
    # ---------------------------------------------------------------------------- #
    if enable_paged_attn:
225
226
        batch_size = 1 if prompts is str else len(prompts)
        max_total_tokens = max_new_tokens + len(input_ids_list[0])
227
        cache_config = PagedKVCacheConfig(
228
            num_blocks=((max_total_tokens + 15) // 16) * batch_size, block_size=16
229
230
231
232
233
234
235
236
237
        )
    else:
        batch_size = 1 if prompts is str else len(prompts)
        initial_capacity = max_new_tokens + len(input_ids_list[0])
        cache_config = StaticKVCacheConfig(
            max_batch_size=batch_size, max_cache_len=initial_capacity
        )

    model.reset_cache(cache_config)
PanZezhong's avatar
PanZezhong committed
238

239
    # ---------------------------------------------------------------------------- #
240
    #                        Generate
241
    # ---------------------------------------------------------------------------- #
PanZezhong's avatar
PanZezhong committed
242
    print(input_contents[0], end="", flush=True)
243
244
245
246
    input_ids_infini = infinicore.from_list(input_ids_list)

    t1 = time.time()
    print("=================== start generate ====================")
247
    output_ids = model.generate(
248
        input_ids_infini,
249
        GenerationConfig(
250
251
252
253
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            top_k=top_k,
            top_p=top_p,
254
255
        ),
        _measure_and_log_time=True,
256
257
258
    )
    t2 = time.time()

259
260
261
    numpy_output_ids = np.array([output_id.to_numpy()[0] for output_id in output_ids])
    print(tokenizer.decode(numpy_output_ids, skip_special_tokens=True))

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    print(
        f"total_time: {round((t2 - t1) * 1000, 2)} ms",
    )


if __name__ == "__main__":
    args = get_args()
    print(args)

    # Parse command line arguments
    device_str = "cpu"
    if args.cpu:
        device_str = "cpu"
    elif args.nvidia:
        device_str = "cuda"
277
278
    elif args.qy:
        device_str = "cuda"
279
280
281
282
283
284
    elif args.metax:
        device_str = "cuda"
    elif args.moore:
        device_str = "musa"
    elif args.iluvatar:
        device_str = "cuda"
285
286
    elif args.cambricon:
        device_str = "mlu"
wooway777's avatar
wooway777 committed
287
288
    elif args.ali:
        device_str = "cuda"
289
290
    elif args.hygon:
        device_str = "cuda"
291
292
    else:
        print(
293
            "Usage:  python examples/jiuge.py [--cpu | --nvidia | --qy | --metax | --moore | --iluvatar | --cambricon | --ali | --hygon] --model_path=<path/to/model_dir>\n"
pengcheng888's avatar
pengcheng888 committed
294
            "such as, python examples/jiuge.py --nvidia --model_path=~/TinyLlama-1.1B-Chat-v1.0"
295
296
297
298
299
300
301
        )
        sys.exit(1)
    prompts = [args.prompt for _ in range(args.batch_size)]

    model_path = args.model_path
    max_new_tokens = args.max_new_tokens
    backend = args.backend
Your Name's avatar
Your Name committed
302
    tp = args.tp
303
    enable_paged_attn = args.enable_paged_attn
304
    enable_graph = args.enable_graph
305
306
307
    if backend != "cpp":
        raise ValueError(f"Unsupported backend: {backend}.")

308
309
310
311
312
313
314
    infini_device = infinicore.device(device_str, 0)

    test(
        prompts,
        model_path,
        max_new_tokens,
        infini_device=infini_device,
Your Name's avatar
Your Name committed
315
        tp=tp,
316
        enable_paged_attn=enable_paged_attn,
317
        enable_graph=enable_graph,
318
319
320
        top_k=args.top_k,
        top_p=args.top_p,
        temperature=args.temperature,
321
    )