qwen2flm.py 836 Bytes
Newer Older
zhouxiang's avatar
zhouxiang committed
1
2
3
4
5
6
import sys
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from fastllm_pytools import torch2flm

if __name__ == "__main__":
7
8
9
10
    model_path = sys.argv[3] if len(sys.argv) >= 4 else "Qwen/Qwen-7B-Chat"
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(model_path, device_map="cpu", trust_remote_code=True, fp32=True).eval()
    model.generation_config = GenerationConfig.from_pretrained(model_path, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
zhouxiang's avatar
zhouxiang committed
11
12
13
14

    dtype = sys.argv[2] if len(sys.argv) >= 3 else "float16"
    exportPath = sys.argv[1] if len(sys.argv) >= 2 else "qwen-7b-" + dtype + ".flm"
    torch2flm.tofile(exportPath, model, tokenizer, dtype = dtype)