llama.py 5.36 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import sys
import time
import os

sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))

import argparse
import infinilm
from infinilm.modeling_utils import get_model_state_dict
from tokenizers import decoders as _dec
from transformers import AutoTokenizer

import infinicore


def get_args():
    parser = argparse.ArgumentParser(description="run Llama args")

    parser.add_argument(
        "--cpu",
        action="store_true",
        help="Run cpu test",
    )
    parser.add_argument(
        "--nvidia",
        action="store_true",
        help="Run nvidia test",
    )
    parser.add_argument(
        "--metax",
        action="store_true",
        help="Run metax test",
    )
34
35
36
37
38
39
40
41
42
43
    parser.add_argument(
        "--moore",
        action="store_true",
        help="Run moore test",
    )
    parser.add_argument(
        "--iluvatar",
        action="store_true",
        help="Run iluvatar test",
    )
44
45
46
47
48
49
50
51
52
53
54
55
    parser.add_argument(
        "--model_path",
        type=str,
        required=True,
        help="model_path",
    )
    parser.add_argument(
        "--max_new_tokens",
        type=int,
        default=100,
        help="max_new_tokens",
    )
56
57
58
59
60
61
    parser.add_argument(
        "--backend",
        type=str,
        default="python",
        help="python or cpp model",
    )
62
63
64
    return parser.parse_args()


65
66
67
68
69
70
71
72
def test(
    prompt,
    model_path,
    max_new_tokens=100,
    infini_dtype=infinicore.bfloat16,
    infini_device=infinicore.device("cpu", 0),
    backend="python",
):
73
74
75
    # ---------------------------------------------------------------------------- #
    #                        创建模型,
    # ---------------------------------------------------------------------------- #
76
77
    model = infinilm.AutoLlamaModel.from_pretrained(
        model_path, device=infini_device, dtype=infini_dtype, backend=backend
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
    )

    # ---------------------------------------------------------------------------- #
    #                        加载权重
    # ---------------------------------------------------------------------------- #
    model_param_infini = get_model_state_dict(
        model_path,
        device=infini_device,
        dtype=infini_dtype,
    )

    model.load_state_dict(model_param_infini)

    config = model.config

    # ---------------------------------------------------------------------------- #
    #                        创建 tokenizer
    # ---------------------------------------------------------------------------- #
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    if "llama" == config.model_type:
        backend = getattr(tokenizer, "backend_tokenizer", None)
        target = getattr(backend, "_tokenizer", backend)
        norm = getattr(target, "normalizer", None)
        dec = getattr(target, "decoder", None)
        sn = repr(norm)[:800] if norm is not None else ""
        sd = repr(dec)[:800] if dec is not None else ""
        has_prepend = "Prepend" in sn
        has_strip = "Strip" in sd
        if has_prepend and has_strip:
            target.decoder = _dec.Sequence(
                [
                    _dec.Replace("▁", " "),
                    _dec.ByteFallback(),
                    _dec.Fuse(),
                ]
            )

    # ---------------------------------------------------------------------------- #
    #                        token编码
    # ---------------------------------------------------------------------------- #
119
    # prompt = "山东最高的山是?"
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    input_content = tokenizer.apply_chat_template(
        conversation=[{"role": "user", "content": prompt}],
        add_generation_prompt=True,
        tokenize=False,
    )
    print(input_content, end="", flush=True)
    input_ids = tokenizer.encode(input_content)

    # ---------------------------------------------------------------------------- #
    #                        自回归生成
    # ---------------------------------------------------------------------------- #
    input_ids_list = [input_ids]  # List: [[1, 1128, 526, 366, 29892]]
    input_ids_infini = infinicore.from_list(input_ids_list)

    t1 = time.time()
    model.generate(
        input_ids_infini,
        max_new_tokens=max_new_tokens,
        device=infini_device,
        tokenizer=tokenizer,
        config=config,
    )
    t2 = time.time()

    print(
        f"total_time: {round((t2 - t1) * 1000, 2)} ms",
    )


if __name__ == "__main__":
    args = get_args()
    print(args)

    # Parse command line arguments
154
    device_str = "cpu"
155
    if args.cpu:
156
        device_str = "cpu"
157
    elif args.nvidia:
158
        device_str = "cuda"
159
    elif args.metax:
160
        device_str = "cuda"
161
    elif args.moore:
162
        device_str = "musa"
163
    elif args.iluvatar:
164
        device_str = "cuda"
165
166
    else:
        print(
167
168
            "Usage:  python examples/llama.py [--cpu | --nvidia | --metax | --moore | --iluvatar] --model_path=<path/to/model_dir>\n"
            "such as, python examples/llama.py --nvidia --model_path=~/TinyLlama-1.1B-Chat-v1.0"
169
170
        )
        sys.exit(1)
171
    prompt = "山东最高的山是?"
172
173
174

    model_path = args.model_path
    max_new_tokens = args.max_new_tokens
175
176
177
178
    backend = args.backend

    infini_device = infinicore.device(device_str, 0)
    infini_dtype = infinicore.bfloat16
179

180
181
182
183
184
185
186
187
    test(
        prompt,
        model_path,
        max_new_tokens,
        infini_device=infini_device,
        infini_dtype=infini_dtype,
        backend=backend,
    )