from mmengine.config import read_base from opencompass.models.turbomind import TurboMindModel with read_base(): from ..datasets.ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets from ..datasets.ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets from ..summarizers.example import summarizer datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), []) work_dir = './outputs/llama2-chat/' llama_chat_meta_template = dict( round=[ dict(role="HUMAN", begin='[INST] ', end=' [/INST]'), dict(role="BOT", begin=' ', end=' ', generate=True), ], ) models = [ dict( type=TurboMindModel, abbr='llama-2-7b-chat-hf-lmdeploy', path="Llama-2-7b-chat-hf", meta_template=llama_chat_meta_template, engine_config=dict(session_len=4096, max_batch_size=32), gen_config=dict(top_k=1, top_p=0.8, temperature=1.0, max_new_tokens=100), max_out_len=100, max_seq_len=2048, batch_size=1, concurrency=1, run_cfg=dict(num_gpus=1, num_procs=1), end_str='[INST]', ) ]