import os import requests import torch from PIL import Image import soundfile from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig model_path = '/home/wanglch/Phi4/Phi-4-multimodal-instruct/' kwargs = {} kwargs['torch_dtype'] = torch.bfloat16 processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True, torch_dtype='auto', _attn_implementation='flash_attention_2', ).cuda() generation_config = GenerationConfig.from_pretrained(model_path, 'generation_config.json') user_prompt = '<|user|>' assistant_prompt = '<|assistant|>' prompt_suffix = '<|end|>' #################################################### text-only #################################################### prompt = f'{user_prompt}what is the answer for 1+1? Explain it.{prompt_suffix}{assistant_prompt}' print(f'>>> Prompt\n{prompt}') inputs = processor(prompt, images=None, return_tensors='pt').to('cuda:0') generate_ids = model.generate( **inputs, max_new_tokens=1000, generation_config=generation_config, ) generate_ids = generate_ids[:, inputs['input_ids'].shape[1] :] response = processor.batch_decode( generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False )[0] print(f'>>> Response\n{response}')