import math import numpy as np from PIL import Image from moviepy.editor import VideoFileClip import tempfile import librosa import soundfile as sf import torch from PIL import Image from modelscope import AutoModel, AutoTokenizer # load omni model default, the default init_vision/init_audio/init_tts is True # if load vision-only model, please set init_audio=False and init_tts=False # if load audio-only model, please set init_vision=False model = AutoModel.from_pretrained( 'MiniCPM-V/MiniCPM-o-2_6', trust_remote_code=True, attn_implementation='sdpa', # sdpa or flash_attention_2 torch_dtype=torch.bfloat16, init_vision=True, init_audio=True, init_tts=True ) model = model.eval().cuda() tokenizer = AutoTokenizer.from_pretrained('MiniCPM-V/MiniCPM-o-2_6', trust_remote_code=True) # In addition to vision-only mode, tts processor and vocos also needs to be initialized model.init_tts() def get_video_chunk_content(video_path, flatten=True): video = VideoFileClip(video_path) print('video_duration:', video.duration) with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as temp_audio_file: temp_audio_file_path = temp_audio_file.name video.audio.write_audiofile(temp_audio_file_path, codec="pcm_s16le", fps=16000) audio_np, sr = librosa.load(temp_audio_file_path, sr=16000, mono=True) num_units = math.ceil(video.duration) # 1 frame + 1s audio chunk contents= [] for i in range(num_units): frame = video.get_frame(i+1) image = Image.fromarray((frame).astype(np.uint8)) audio = audio_np[sr*i:sr*(i+1)] if flatten: contents.extend(["", image, audio]) else: contents.append(["", image, audio]) return contents video_path="./MiniCPM-o-2_6/assets/Skiing.mp4" # if use voice clone prompt, please set ref_audio ref_audio_path = "./MiniCPM-V/MiniCPM-o-2_6/assets/demo.wav" ref_audio, _ = librosa.load(ref_audio_path, sr=16000, mono=True) sys_msg = model.get_sys_prompt(ref_audio=ref_audio, mode='omni', language='en') # or use default prompt # sys_msg = model.get_sys_prompt(mode='omni', language='en') contents = get_video_chunk_content(video_path) msg = {"role":"user", "content": contents} msgs = [sys_msg, msg] # please set generate_audio=True and output_audio_path to save the tts result generate_audio = True output_audio_path = 'output.wav' res = model.chat( msgs=msgs, tokenizer=tokenizer, sampling=True, temperature=0.5, max_new_tokens=4096, omni_input=True, # please set omni_input=True when omni inference use_tts_template=True, generate_audio=generate_audio, output_audio_path=output_audio_path, max_slice_nums=1, use_image_id=False, return_dict=True ) print(res)