eval_llama2_chat_lmdeploy.py 1.16 KB
Newer Older
jerrrrry's avatar
jerrrrry committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from mmengine.config import read_base
from opencompass.models.turbomind import TurboMindModel

with read_base():
    from ..datasets.ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets 
    from ..datasets.ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets
    from ..summarizers.example import summarizer
    
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
work_dir = './outputs/llama2-chat/'

llama_chat_meta_template = dict(
    round=[
        dict(role="HUMAN", begin='[INST] ', end=' [/INST]'),
        dict(role="BOT", begin=' ', end=' ', generate=True),
    ],
)

models = [
dict(
        type=TurboMindModel,
        abbr='llama-2-7b-chat-hf-lmdeploy',
        path="Llama-2-7b-chat-hf",
        meta_template=llama_chat_meta_template,
        engine_config=dict(session_len=4096,
                           max_batch_size=32),
        gen_config=dict(top_k=1,
                        top_p=0.8,
                        temperature=1.0,
                        max_new_tokens=100),
        max_out_len=100,
        max_seq_len=2048,
        batch_size=1,
        concurrency=1,
        run_cfg=dict(num_gpus=1, num_procs=1),
        end_str='[INST]',
    )
]