from mmengine.config import read_base with read_base(): # from ..datasets.ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets # from ..datasets.ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets from ..datasets.ceval.ceval_gen_5f30c7 import ceval_datasets from ..datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_gen_883d50 import BoolQ_datasets from ..datasets.humaneval.humaneval_gen_8e312c import humaneval_datasets from ..summarizers.example import summarizer datasets = sum([v for k, v in locals().items() if k.endswith("_datasets") or k == 'datasets'], []) work_dir = './outputs/qwen1.5-series/' from opencompass.models import VLLM settings = [ ('qwen1.5-0.5b-vllm', 'Qwen/Qwen1.5-0.5B', 1), ('qwen1.5-1.8b-vllm', 'Qwen/Qwen1.5-1.8B', 1), ('qwen1.5-4b-vllm', 'Qwen/Qwen1.5-4B', 1), ('qwen1.5-7b-vllm', 'Qwen/Qwen1.5-7B', 1), ('qwen1.5-14b-vllm', 'Qwen/Qwen1.5-14B', 1), ('qwen1.5-32b-vllm', 'Qwen/Qwen1.5-32B', 2), ('qwen1.5-72b-vllm', 'Qwen/Qwen1.5-72B', 4), ('qwen1.5-110b-vllm', 'Qwen/Qwen1.5-110B', 4), ] models = [] for abbr, path, num_gpus in settings: models.append( dict( type=VLLM, abbr=abbr, path=path, model_kwargs=dict(tensor_parallel_size=num_gpus), # add quantization="awq" or quantization="gptq" to eval quantization models max_out_len=100, max_seq_len=2048, batch_size=32, generation_kwargs=dict(temperature=0), run_cfg=dict(num_gpus=num_gpus, num_procs=1), ) )