minicpm-v_version.py 964 Bytes
Newer Older
wanglch's avatar
wanglch committed
1
# test.py
wanglch's avatar
wanglch committed
2
3
4
5
import torch
from PIL import Image
from modelscope import AutoModel, AutoTokenizer

wanglch's avatar
wanglch committed
6
7
model = AutoModel.from_pretrained('OpenBMB/MiniCPM-V-2_6', trust_remote_code=True,
    attn_implementation='sdpa', torch_dtype=torch.bfloat16) # sdpa or flash_attention_2, no eager
wanglch's avatar
wanglch committed
8
model = model.eval().cuda()
wanglch's avatar
wanglch committed
9
tokenizer = AutoTokenizer.from_pretrained('OpenBMB/MiniCPM-V-2_6', trust_remote_code=True)
wanglch's avatar
wanglch committed
10

wanglch's avatar
wanglch committed
11
12
image = Image.open('xx.jpg').convert('RGB')
question = 'What is in the image?'
wanglch's avatar
wanglch committed
13
14
15
16
17
18
19
20
21
msgs = [{'role': 'user', 'content': [image, question]}]

res = model.chat(
    image=None,
    msgs=msgs,
    tokenizer=tokenizer
)
print(res)

wanglch's avatar
wanglch committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
## if you want to use streaming, please make sure sampling=True and stream=True
## the model.chat will return a generator
res = model.chat(
    image=None,
    msgs=msgs,
    tokenizer=tokenizer,
    sampling=True,
    stream=True
)

generated_text = ""
for new_text in res:
    generated_text += new_text
    print(new_text, flush=True, end='')