minicpm-v_version.py 974 Bytes
Newer Older
wanglch's avatar
wanglch committed
1
# test.py
wanglch's avatar
wanglch committed
2
3
4
5
import torch
from PIL import Image
from modelscope import AutoModel, AutoTokenizer

zzg_666's avatar
zzg_666 committed
6
model = AutoModel.from_pretrained('../OpenBMB/MiniCPM-V-2_6', trust_remote_code=True,
wanglch's avatar
wanglch committed
7
    attn_implementation='sdpa', torch_dtype=torch.bfloat16) # sdpa or flash_attention_2, no eager
wanglch's avatar
wanglch committed
8
model = model.eval().cuda()
zzg_666's avatar
zzg_666 committed
9
tokenizer = AutoTokenizer.from_pretrained('../OpenBMB/MiniCPM-V-2_6', trust_remote_code=True)
wanglch's avatar
wanglch committed
10

zzg_666's avatar
zzg_666 committed
11
image = Image.open('./icon.png').convert('RGB')
wanglch's avatar
wanglch committed
12
question = 'What is in the image?'
wanglch's avatar
wanglch committed
13
14
15
16
17
18
19
20
21
msgs = [{'role': 'user', 'content': [image, question]}]

res = model.chat(
    image=None,
    msgs=msgs,
    tokenizer=tokenizer
)
print(res)

wanglch's avatar
wanglch committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
## if you want to use streaming, please make sure sampling=True and stream=True
## the model.chat will return a generator
res = model.chat(
    image=None,
    msgs=msgs,
    tokenizer=tokenizer,
    sampling=True,
    stream=True
)

generated_text = ""
for new_text in res:
    generated_text += new_text
    print(new_text, flush=True, end='')