Commit c6e1f2bd authored by wanglch's avatar wanglch
Browse files

Initial commit

parents
Pipeline #2617 failed with stages
in 0 seconds
FROM image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.3.0-py3.10-dtk24.04.3-ubuntu20.04
\ No newline at end of file
# MiniCPM-o 2.6
## 论文
[MiniCPM-o 2.6: A GPT-4o Level MLLM for Vision, Speech, and Multimodal Live Streaming on Your Phone](https://openbmb.notion.site/MiniCPM-o-2-6-A-GPT-4o-Level-MLLM-for-Vision-Speech-and-Multimodal-Live-Streaming-on-Your-Phone-185ede1b7a558042b5d5e45e6b237da9)
## 模型结构
MiniCPM-o 2.6用于端到端全模态建模的整体架构。该模型基于SigLip-400M Whisper-medium-300M ChatTTS-200M和Qwen2.5-7B-Instruct构建,共有8B个参数。总体框架如下所示。
<div align=center>
<img src="./Pic/arch.png"/>
</div>
## 算法原理
全模态直播流机制,包括(1)将语音编码器和解码器改为在线流,以及(2)使LLM主干网能够处理并行的多模态流信息。MiniCPM-o 2.6将输入音频分成块,其中块是对应于一秒钟音频的固定数量的音频令牌。在音频编码期间,每个块在关注自身和先前块的因果注意力范例中被编码,以(1)服务于在线流编码需求,同时(2)与离线整体编码相比保持最小的信息损失。为了实现流式语音生成,每次预填充固定数量的文本令牌(大小为n的块),并且解码器立即解码固定数量的音频令牌(大小为m的块)。然后对下一个文本令牌和音频令牌重复该过程,依此类推。值得注意的是,文本块和它们对应的音频块之间的对齐并不精确,因此在实践中为文本令牌块大小保留了更大的预算。
<div align=center>
<img src="./Pic/theory.png"/>
</div>
## 环境配置
### Docker(方法一)
推荐使用docker方式运行, 此处提供[光源](https://www.sourcefind.cn/#/service-details)拉取docker镜像的地址与使用步骤
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.3.0-py3.10-dtk24.04.3-ubuntu20.04
docker run -it --shm-size=1024G -v /path/your_code_data/:/path/your_code_data/ -v /opt/hyhal:/opt/hyhal --network=host --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video --name minicpm_o <your IMAGE ID> bash # <your IMAGE ID>为以上拉取的docker的镜像ID替换
git clone http://developer.sourcefind.cn/codes/modelzoo/minicpm-o-2.6_pytorch.git
cd /path/your_code_data/
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
```
Tips:以上dtk驱动、python、torch等DCU相关工具版本需要严格一一对应。
### Dockerfile(方法二)
此处提供dockerfile的使用方法
```
docker build -t internvl:latest .
docker run --shm-size 500g --network=host --name=minicpm_o --privileged --device=/dev/kfd --network=host --device=/dev/dri --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v 项目地址(绝对路径):/home/ -v /opt/hyhal:/opt/hyhal:ro -it <your IMAGE ID> bash
git clone http://developer.sourcefind.cn/codes/modelzoo/minicpm-o-2.6_pytorch.gitit
cd /path/your_code_data/
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
```
### Anaconda(方法三)
此处提供本地配置、编译的详细步骤,例如:
关于本项目DCU显卡所需的特殊深度学习库可从[光合](https://developer.hpccube.com/tool/)开发者社区下载安装。
```
DTK驱动:dtk24.04.3
python:3.10
torch:2.3.0
transformers==4.48.3
```
`Tips:以上dtk驱动、python、torch等DCU相关工具版本需要严格一一对应`
其它非深度学习库参照requirement.txt安装:
```
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
cd /path/your_code_data/
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
```
## 数据集
ms-swift 自带数据集 AI-ModelScope/LaTeX_OCR:human_handwrite#20000
## 训练
使用ms-swift框架微调
```
git clone https://github.com/modelscope/ms-swift.git
cd ms-swift
pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simpl
```
### 单机多卡
sh finetune.sh
## 推理
### 单机多卡
音频推理:
```
CUDA_VISIBLE_DEVICES=0,1,2,3python minicpm-o_audio.py
```
视觉推理:
```
CUDA_VISIBLE_DEVICES=0,1,2,3python minicpm-o_version.py
```
## result
- 音频推理
<div align=left>
<img src="./Pic/result1.png"/>
</div>
- 视觉推理
<div align=left>
<img src="./Pic/result2.png"/>
</div>
### 精度
## 应用场景
### 算法类别
`对话问答`
### 热点应用行业
`科研,教育,政府,金融`
## 预训练权重
预训练权重快速下载中心:[SCNet AIModels](https://www.scnet.cn/ui/aihub/models)
项目中的预训练权重可从快速下载通道下载:[openbmb/MiniCPM-o-2_6](https://gitlab.scnet.cn:9002/model/sugon_scnet/InfiniteYou.git)
HF/github下载地址为:[openbmb/MiniCPM-o-2_6](https://huggingface.co/openbmb/MiniCPM-o-2_6)
魔搭下载路径
- [openbmb/MiniCPM-o-2_6 魔搭下载](https://www.modelscope.cn/models/OpenBMB/MiniCPM-o-2_6/files)
## 源码仓库及问题反馈
- http://developer.sourcefind.cn/codes/modelzoo/minicpm-o-2.6_pytorch.git
## 参考资料
- https://github.com/OpenBMB/MiniCPM-o
CUDA_VISIBLE_DEVICES=1,2,3,5 \
swift sft \
--model "MiniCPM-V/MiniCPM-o-2_6/" \
--dataset AI-ModelScope/LaTeX_OCR:human_handwrite#20000 \
--train_type lora \
--torch_dtype bfloat16 \
--num_train_epochs 0.1 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--learning_rate 1e-4 \
--lora_rank 8 \
--lora_alpha 32 \
--target_modules all-linear \
--freeze_vit true \
--gradient_accumulation_steps 16 \
--eval_steps 50 \
--save_steps 50 \
--save_total_limit 5 \
--logging_steps 5 \
--max_length 2048 \
--output_dir output \
--warmup_ratio 0.05 \
--dataloader_num_workers 4
icon.png

53.8 KB

import math
import numpy as np
from PIL import Image
from moviepy.editor import VideoFileClip
import tempfile
import librosa
import soundfile as sf
import torch
from PIL import Image
from modelscope import AutoModel, AutoTokenizer
# load omni model default, the default init_vision/init_audio/init_tts is True
# if load vision-only model, please set init_audio=False and init_tts=False
# if load audio-only model, please set init_vision=False
model = AutoModel.from_pretrained(
'MiniCPM-V/MiniCPM-o-2_6',
trust_remote_code=True,
attn_implementation='sdpa', # sdpa or flash_attention_2
torch_dtype=torch.bfloat16,
init_vision=True,
init_audio=True,
init_tts=True
)
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained('MiniCPM-V/MiniCPM-o-2_6', trust_remote_code=True)
# In addition to vision-only mode, tts processor and vocos also needs to be initialized
model.init_tts()
def get_video_chunk_content(video_path, flatten=True):
video = VideoFileClip(video_path)
print('video_duration:', video.duration)
with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as temp_audio_file:
temp_audio_file_path = temp_audio_file.name
video.audio.write_audiofile(temp_audio_file_path, codec="pcm_s16le", fps=16000)
audio_np, sr = librosa.load(temp_audio_file_path, sr=16000, mono=True)
num_units = math.ceil(video.duration)
# 1 frame + 1s audio chunk
contents= []
for i in range(num_units):
frame = video.get_frame(i+1)
image = Image.fromarray((frame).astype(np.uint8))
audio = audio_np[sr*i:sr*(i+1)]
if flatten:
contents.extend(["<unit>", image, audio])
else:
contents.append(["<unit>", image, audio])
return contents
video_path="./MiniCPM-o-2_6/assets/Skiing.mp4"
# if use voice clone prompt, please set ref_audio
ref_audio_path = "./MiniCPM-V/MiniCPM-o-2_6/assets/demo.wav"
ref_audio, _ = librosa.load(ref_audio_path, sr=16000, mono=True)
sys_msg = model.get_sys_prompt(ref_audio=ref_audio, mode='omni', language='en')
# or use default prompt
# sys_msg = model.get_sys_prompt(mode='omni', language='en')
contents = get_video_chunk_content(video_path)
msg = {"role":"user", "content": contents}
msgs = [sys_msg, msg]
# please set generate_audio=True and output_audio_path to save the tts result
generate_audio = True
output_audio_path = 'output.wav'
res = model.chat(
msgs=msgs,
tokenizer=tokenizer,
sampling=True,
temperature=0.5,
max_new_tokens=4096,
omni_input=True, # please set omni_input=True when omni inference
use_tts_template=True,
generate_audio=generate_audio,
output_audio_path=output_audio_path,
max_slice_nums=1,
use_image_id=False,
return_dict=True
)
print(res)
import torch
from PIL import Image
from modelscope import AutoModel, AutoTokenizer
# load omni model default, the default init_vision/init_audio/init_tts is True
# if load vision-only model, please set init_audio=False and init_tts=False
# if load audio-only model, please set init_vision=False
model = AutoModel.from_pretrained(
'MiniCPM-V/MiniCPM-o-2_6',
trust_remote_code=True,
attn_implementation='sdpa', # sdpa or flash_attention_2
torch_dtype=torch.bfloat16,
init_vision=True,
init_audio=True,
init_tts=True
)
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained('MiniCPM-V/MiniCPM-o-2_6', trust_remote_code=True)
# In addition to vision-only mode, tts processor and vocos also needs to be initialized
model.init_tts()
# test.py
image = Image.open('../images/XXXX.jpg').convert('RGB')
question = 'ocr this image?'
msgs = [{'role': 'user', 'content': [image, question]}]
res = model.chat(
image=None,
msgs=msgs,
tokenizer=tokenizer
)
print(res)
# 模型唯一标识
modelCode=1483
# 模型名称
modelName=MiniCPM-o 2.6_pytorch
# 模型描述
modelDescription=MiniCPM-o 2.6是MiniCPM-o 系列的最新、性能最佳模型。总参数量 8B,视觉、语音和多模态流式能力达到了 GPT-4o 级别
# 应用场景
appScenario=推理,对话问答,科研,教育,政府,金融
# 框架类型
frameType=Pytorch
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment