import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation.utils import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("/baichuan/baichuan-13b-chat", use_fast=False, trust_remote_code=True) # lora from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained("/baichuan/baichuan2_pytorch/fine-tune/output/checkpoint-4", trust_remote_code=True) # full parameter # model = AutoModelForCausalLM.from_pretrained("/public/home/zhaoying1/work/Baichuan2-main/fine-tune/slurm_script/output/checkpoint-420", device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True) model.generation_config = GenerationConfig.from_pretrained("/baichuan/baichuan-13b-chat") messages = [] messages.append({"role": "user", "content": "解释一下“温故而知新”"}) response = model.chat(tokenizer, messages) print(response)