Commit 0ff1156e authored by zhouxiang's avatar zhouxiang
Browse files

完善

parent 6ab9856b
......@@ -5,7 +5,7 @@ from transformers.generation.utils import GenerationConfig
from fastllm_pytools import torch2flm
if __name__ == "__main__":
modelpath = "baichuan-inc/Baichuan-13B-Chat""
modelpath = "baichuan-inc/Baichuan-13B-Chat"
tokenizer = AutoTokenizer.from_pretrained(modelpath, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(modelpath, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True)
model.to("cpu")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment