from mmengine.config import read_base with read_base(): # from ..datasets.ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets # from ..datasets.ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets from ..datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_gen_883d50 import BoolQ_datasets from ..summarizers.example import summarizer datasets = sum([v for k, v in locals().items() if k.endswith("_datasets") or k == 'datasets'], []) work_dir = './outputs/deepseek-llm-series/' from opencompass.models import VLLMwithChatTemplate settings = [ ('deepseek-7b-chat-vllm', 'deepseek-ai/deepseek-llm-7b-chat', 1), ('deepseek-67b-chat-vllm', 'deepseek-ai/deepseek-llm-67b-chat', 4), ('deepseek-moe-16b-chat-vllm', 'deepseek-ai/deepseek-moe-16b-chat', 1), ] models = [] for abbr, path, num_gpus in settings: models.append( dict( type=VLLMwithChatTemplate, abbr=abbr, path=path, model_kwargs=dict(tensor_parallel_size=num_gpus), max_out_len=1024, batch_size=16, generation_kwargs=dict(temperature=0), run_cfg=dict(num_gpus=num_gpus), ) )