import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig # 如果您希望结果可复现,可以设置随机数种子。 # torch.manual_seed(1234) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="cuda", trust_remote_code=True).eval() model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True) query = tokenizer.from_list_format([ {'image': 'assets/mm_tutorial/Rebecca_(1939_poster).jpeg'}, {'text': 'What is the name of the movie in the poster?'}, ]) """ query = tokenizer.from_list_format([ {'text': 'Who directed this movie?'}, ]) response, history = model.chat(tokenizer, query=query, history=history) print(response) """ response, history = model.chat(tokenizer, query=query, history=None) print(response)