hf_model.py 7.25 KB
Newer Older
1
2
3
4
from fastllm_pytools import llm;
import torch;
import ctypes;
import numpy as np;
zhouxiang's avatar
zhouxiang committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

fastllm_data_type_dict = {
    "int4": 8,
    "int8": 3,
    "float16": 7
}
fastllm_weight_type_dict = {
    "linear": 1,
    "embedding": 2,
    "QuantizedLinear": 111
}

def create(model,
           tokenizer = None,
           pre_prompt = None,
           user_role = None,
           bot_role = None,
           history_sep = None,
           dtype = "float16"):
    if (dtype not in fastllm_data_type_dict):
25
26
        print("dtype should in ", list(fastllm_data_type_dict.keys()));
        exit(0);
zhouxiang's avatar
zhouxiang committed
27
28

    # 0.1 model info
29
30
31
    # if model.config.model_type == "chatglm" and model.config.transformers_version == "4.30.2":
    #    model.config.model_type = "chatglm3"
    #    print("model.config.model_type: chatglm3!")
zhouxiang's avatar
zhouxiang committed
32
33
34
35
    modelInfo = model.config.__dict__
    if model.generation_config is not None:
        modelInfo.update(model.generation_config.__dict__)
    if (pre_prompt):
36
        modelInfo["pre_prompt"] = pre_prompt;
zhouxiang's avatar
zhouxiang committed
37
    if (user_role):
38
        modelInfo["user_role"] = user_role;
zhouxiang's avatar
zhouxiang committed
39
    if (bot_role):
40
        modelInfo["bot_role"] = bot_role;
zhouxiang's avatar
zhouxiang committed
41
    if (history_sep):
42
        modelInfo["history_sep"] = history_sep;
zhouxiang's avatar
zhouxiang committed
43
44
    if (modelInfo["model_type"] == "baichuan" and hasattr(model, "model") and hasattr(model.model, "get_alibi_mask")):
        # Baichuan 2代
45
46
47
48
49
        modelInfo["use_alibi"] = "1";
        modelInfo["pre_prompt"] = "";
        modelInfo["user_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.user_token_id) + "> ") if hasattr(model.generation_config, "user_token_id") else "";
        modelInfo["bot_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.assistant_token_id) + ">") if hasattr(model.generation_config, "assistant_token_id") else "";
        modelInfo["history_sep"] = "";
zhouxiang's avatar
zhouxiang committed
50
51
52
53
    if (modelInfo["model_type"] == "qwen"):
        if modelInfo["chat_format"] == "chatml":
            modelInfo["im_end_id"] = tokenizer.im_end_id
            modelInfo["im_start_id"] = tokenizer.im_start_id
54
55
56
57
58
59
    if (modelInfo["model_type"] == "chatglm" and hasattr(tokenizer, "build_chat_input")):
        # chatglm3
        modelInfo["pre_prompt"] = "";
        modelInfo["user_role"] = ("<FLM_FIX_TOKEN_" + str(tokenizer.get_command("<|user|>")) + "> \n");
        modelInfo["bot_role"] = ("<FLM_FIX_TOKEN_" + str(tokenizer.get_command("<|assistant|>")) + ">");
        modelInfo["history_sep"] = "";
zhouxiang's avatar
zhouxiang committed
60

61
    modelInfo["tokenizer_use_score"] = "1" # 分词带分数
zhouxiang's avatar
zhouxiang committed
62

63
64
65
    weight_type_dict = {};
    module_dict = {};
    weight_bits = {};
zhouxiang's avatar
zhouxiang committed
66
67
    for key, m in model.named_modules():
        if (str(type(m)).find("QuantizedLinear") != -1):
68
69
            weight_type_dict[key + ".weight"] = "QuantizedLinear";
            weight_bits[key + ".weight"] = m.weight_bit_width;
zhouxiang's avatar
zhouxiang committed
70
        if (isinstance(m, torch.nn.Linear)):
71
72
            weight_type_dict[key + ".weight"] = "linear";
            module_dict[key + ".weight"] = m;
zhouxiang's avatar
zhouxiang committed
73
        if (isinstance(m, torch.nn.Embedding)):
74
            weight_type_dict[key] = "embedding";
zhouxiang's avatar
zhouxiang committed
75
76
77
78
79

    peft_config = {}
    active_adapter = ""
    if hasattr(model, "peft_config"):
        peft_config = model.peft_config
80
81
    if hasattr(model, "active_adapter") and isinstance(model.active_adapter, str):
        # in transformers >= 4.33.0, active_adapter is a funtion in model, ignore it now
zhouxiang's avatar
zhouxiang committed
82
83
        active_adapter = model.active_adapter

84
85
86
87
    model = model.cpu();
    dict = model.state_dict();
    model_type = model.config.__dict__["model_type"];
    model = llm.fastllm_lib.create_empty_llm_model(model_type.encode());
zhouxiang's avatar
zhouxiang committed
88
    for it in modelInfo.keys():
89
        llm.fastllm_lib.add_dict_llm_model(model, str(it).encode(), str(modelInfo[it]).encode());
zhouxiang's avatar
zhouxiang committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103

    for adapter_name in peft_config.keys():
        adapter_dict = peft_config[adapter_name].__dict__
        for it in adapter_dict.keys():
            llm.fastllm_lib.add_adapter_dict_llm_model(model, str(adapter_name).encode(), str(it).encode(), str(adapter_dict[it]).encode())
    if len(active_adapter) != 0:
        llm.fastllm_lib.set_adapter(model, str(active_adapter).encode())

    # 1. vocab
    if (tokenizer):
        if (hasattr(tokenizer, "tokenizer")):
            if modelInfo["model_type"] == "qwen":
                pass
            else:
104
                tokenizer = tokenizer.tokenizer;
zhouxiang's avatar
zhouxiang committed
105
        if (hasattr(tokenizer, "sp_model")):
106
            piece_size = tokenizer.sp_model.piece_size();
zhouxiang's avatar
zhouxiang committed
107
108
            for i in range(piece_size):
                llm.fastllm_lib.add_tokenizer_word_llm_model(model, tokenizer.sp_model.id_to_piece(i).encode(),
109
                                                             i, ctypes.c_float(tokenizer.sp_model.get_score(i)));
zhouxiang's avatar
zhouxiang committed
110
        else:
111
            vocab = tokenizer.get_vocab();
zhouxiang's avatar
zhouxiang committed
112
113
            for v in vocab.keys():
                if (modelInfo["model_type"] == "moss"):
114
115
                    vv = [(ord(c) if c not in tokenizer.byte_decoder else tokenizer.byte_decoder[c]) for c in v];
                    llm.fastllm_lib.add_tokenizer_word_llm_model(model, vv, vocab[v], ctypes.c_float(1.0));
zhouxiang's avatar
zhouxiang committed
116
                elif (modelInfo["model_type"] == "qwen"):
117
                    llm.fastllm_lib.add_tokenizer_word_llm_model(model, v, vocab[v], ctypes.c_float(1.0));
zhouxiang's avatar
zhouxiang committed
118
                else:
119
120
                    llm.fastllm_lib.add_tokenizer_word_llm_model(model, v.encode(), vocab[v], ctypes.c_float(1.0));
    tot = 0;
zhouxiang's avatar
zhouxiang committed
121
    for key in dict:
122
123
124
        ori_data_type = 0;
        ori_np_data_type = np.float32;
        cur_weight_type = 0;
zhouxiang's avatar
zhouxiang committed
125
        if (key in weight_type_dict and weight_type_dict[key] in fastllm_weight_type_dict):
126
127
            cur_weight_type = fastllm_weight_type_dict[weight_type_dict[key]];
        to_data_type = 0;
zhouxiang's avatar
zhouxiang committed
128
129

        if (cur_weight_type == 1):
130
            to_data_type = fastllm_data_type_dict[dtype];
zhouxiang's avatar
zhouxiang committed
131
            if (to_data_type == 7):
132
133
                ori_data_type = 7;
                ori_np_data_type = np.float16;
zhouxiang's avatar
zhouxiang committed
134
135
        elif (cur_weight_type == 2):
            # TODO bfloat
136
            to_data_type = 0;
zhouxiang's avatar
zhouxiang committed
137
138
139
140
141
142
143
144
145
146

        weight_name = key
        if peft_config is not None:
            weight_name = weight_name.replace('base_model.model.', '')
        if (cur_weight_type == 111):
            llm.fastllm_lib.add_qlinear_weight_llm_model(model, weight_name.encode(),
                                                 len(dict[key].shape),
                                                 (ctypes.c_int * len(dict[key].shape))(*list(dict[key].shape)),
                                                 weight_bits[key],
                                                 dict[key + "_scale"].numpy().astype(np.float32).ctypes.data_as(ctypes.c_void_p),
147
                                                 dict[key].numpy().ctypes.data_as(ctypes.c_void_p));
zhouxiang's avatar
zhouxiang committed
148
149
150
151
152
        else:
            llm.fastllm_lib.add_weight_llm_model(model, weight_name.encode(),
                                             len(dict[key].shape),
                                             (ctypes.c_int * len(dict[key].shape))(*list(dict[key].shape)),
                                             to_data_type, cur_weight_type, ori_data_type,
153
154
155
                                             dict[key].numpy().astype(ori_np_data_type).ctypes.data_as(ctypes.c_void_p));
        tot += 1;
        print("convert (", tot, "/", len(dict), end = " )\r");
zhouxiang's avatar
zhouxiang committed
156

157
158
159
160
161
    print("");
    llm.fastllm_lib.init_params_llm_model(model);
    llm.fastllm_lib.warmup_llm_model(model);
    ret = llm.model("", id = model);
    return ret;
zhouxiang's avatar
zhouxiang committed
162