Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
import os
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import GaokaoBenchDataset
from mmengine.config import read_base
with read_base():
from .GaokaoBench_prompts import MCQ_prompts, FBQ_prompts
GaokaoBench_datasets = []
for folder, prompts in [
('Multiple-choice_Questions', MCQ_prompts),
('Fill-in-the-blank_Questions', FBQ_prompts),
]:
for p in prompts:
reader_cfg = {
'input_columns': ['question'],
'output_column': 'answer',
}
infer_cfg = {
'prompt_template': {
'type': PromptTemplate,
'template': p['prefix_prompt'] + '{question}',
},
'retriever': {'type': ZeroRetriever},
'inferencer': {'type': GenInferencer, 'max_out_len': 1024},
}
eval_cfg = {
'evaluator': {'type': 'GaokaoBenchEvaluator' + '_' + p['type']},
'pred_role': 'BOT',
}
_base_path = 'opencompass/GAOKAO-BENCH'
dataset = {
'type': GaokaoBenchDataset,
'abbr': 'GaokaoBench_' + p['keyword'],
'path': _base_path,
'filename': '/' + folder + '/' + p['keyword'] + '.json',
'name': p['keyword'],
'reader_cfg': reader_cfg,
'infer_cfg': infer_cfg,
'eval_cfg': eval_cfg,
}
GaokaoBench_datasets.append(dataset)
MCQ_prompts = [
{
'type': 'single_choice',
'keyword': '2010-2022_Math_II_MCQs',
'prefix_prompt': '请你做一道数学选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
'comment': '',
},
{
'type': 'single_choice',
'keyword': '2010-2022_Math_I_MCQs',
'prefix_prompt': '请你做一道数学选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
'comment': '',
},
{
'type': 'single_choice',
'keyword': '2010-2022_History_MCQs',
'prefix_prompt': '请你做一道历史选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
},
{
'type': 'single_choice',
'keyword': '2010-2022_Biology_MCQs',
'prefix_prompt': '请你做一道生物选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
},
{
'type': 'single_choice',
'keyword': '2010-2022_Political_Science_MCQs',
'prefix_prompt': '请你做一道政治选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
},
{
'type': 'multi_choice',
'keyword': '2010-2022_Physics_MCQs',
'prefix_prompt': '请你做一道物理选择题。\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出所有符合题意的答案,并写在【答案】和<eoa>之间。\n例如:【答案】 AB <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】... <eoa>\n请你严格按照上述格式作答。\n',
},
{
'type': 'single_choice',
'keyword': '2010-2022_Chemistry_MCQs',
'prefix_prompt': '请你做一道化学选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
},
{
'type': 'single_choice',
'keyword': '2010-2013_English_MCQs',
'prefix_prompt': '请你做一道英语选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。\n题目如下:',
},
{
'type': 'multi_question_choice',
'keyword': '2010-2022_Chinese_Modern_Lit',
'prefix_prompt': '请你做一道语文阅读理解题,其中包含三个小题。\n请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:(1)【答案】 A <eoa>\n(2)【答案】 B <eoa>\n请你严格按照上述格式作答。\n',
},
{
'type': 'multi_question_choice',
'keyword': '2010-2022_English_Fill_in_Blanks',
'prefix_prompt': '请你做一道英语完形填空题,其中包含二十个小题。\n请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:(1)【答案】 A <eoa>\n(2)【答案】 B <eoa>\n请你严格按照上述格式作答。\n',
},
{
'type': 'five_out_of_seven',
'keyword': '2012-2022_English_Cloze_Test',
'prefix_prompt': '请回答下面的问题,将符合题意的五个选项的字母写在【答案】和<eoa>之间,例如“【答案】 A B C D E <eoa>\n请严格按照上述格式作答。\n',
},
{
'type': 'multi_question_choice',
'keyword': '2010-2022_Geography_MCQs',
'prefix_prompt': '请你做一道地理选择题,其中包含两到三个小题。\n请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:(1)【答案】 A <eoa>\n(2)【答案】 B <eoa>\n请你严格按照上述格式作答。\n',
},
{
'type': 'multi_question_choice',
'keyword': '2010-2022_English_Reading_Comp',
'prefix_prompt': '请你做一道英语阅读理解题,其中包含三到五个小题。\n请你一步一步思考。每一题你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:(1)【答案】 A <eoa>\n(2)【答案】 B <eoa>\n请你严格按照上述格式作答。\n',
},
{
'type': 'multi_question_choice',
'keyword': '2010-2022_Chinese_Lang_and_Usage_MCQs',
'prefix_prompt': '请你做一道语文选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】: A <eoa>\n完整的题目回答的格式如下:\n(1)【解析】 ... <eoe>\n【答案】 ... <eoa>\n(2)【解析】 ... <eoe>\n【答案】 ... <eoa>\n请你严格按照上述格式作答。如果不止一道题,请分别作答\n题目如下:',
},
]
FBQ_prompts = [
{
'type': 'cloze',
'keyword': '2010-2022_Math_I_Fill-in-the-Blank',
'prefix_prompt': '请解答下面的数学填空题\n仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。\n完整的题目回答格式如下:\n【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。\n题目如下:',
'comment': '',
},
{
'type': 'cloze',
'keyword': '2010-2022_Math_II_Fill-in-the-Blank',
'prefix_prompt': '请解答下面的数学填空题\n仔细阅读题目,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间。\n完整的题目回答格式如下:\n【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。\n题目如下:',
'comment': '',
},
{
'type': 'cloze',
'keyword': '2010-2022_Chinese_Language_Famous_Passages_and_Sentences_Dictation',
'prefix_prompt': '请回答下面的语文填空题\n请你仔细阅读题目,先找到题目对应的中国名篇,再从名篇中找到合适的句子填写到题目的空白处。请你将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n(2)【解析】 ...<eoe>\n【答案】...<eoa>\n请严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'cloze',
'keyword': '2014-2022_English_Language_Cloze_Passage',
'prefix_prompt': '请回答下面的英语短文填词题\n仔细阅读题目,空白处请填入一个适当单词或者括号内单词的正确形式。请你一步步思考,将思考过程写在【解析】和<eoe>之间,将最终答案写在【答案】和<eoa>之间。\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n(2)【解析】 ...<eoe>\n【答案】...<eoa>\n请严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
]
OEQ_prompts = [
{
'type': 'subjective',
'keyword': '2010-2022_Geography_Open-ended_Questions',
'prefix_prompt': '请解答下面的地理解答题\n仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。你的答案请写在【答案】和<eoa>之间\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chemistry_Open-ended_Questions',
'prefix_prompt': '请解答下面的化学解答题\n仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Math_I_Open-ended_Questions',
'prefix_prompt': '请解答下面的数学解答题\n仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_History_Open-ended_Questions',
'prefix_prompt': '请解答下面的历史解答题\n仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Biology_Open-ended_Questions',
'prefix_prompt': '请解答下面的生物解答题\n仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,同一小题的答案用\t分隔开。\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...\t...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...\t...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Math_II_Open-ended_Questions',
'prefix_prompt': '请解答下面的数学解答题\n仔细阅读题目并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间,答案需要有完整的解题步骤。\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Physics_Open-ended_Questions',
'prefix_prompt': '请解答下面的物理解答题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。\n完整的题目回答格式如下:(1)【解析】 ...<eoe>\n【答案】 ...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Political_Science_Open-ended_Questions',
'prefix_prompt': '请解答下面的政治解答题\n仔细阅读材料和题目,并充分结合你已有的知识,解答其中的问题,请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的答案写在【答案】和<eoa>之间\n完整的题目回答格式如下:\n(1)【解析】 ...<eoe>\n【答案】...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'correction',
'keyword': '2012-2022_English_Language_Error_Correction',
'prefix_prompt': '请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。\n完整的题目回答格式如下:【答案】 ...<eoa>\n 请你严格按照上述格式作答。\n题目如下:',
# "prefix_prompt": [
# "请解答下面的英语短文改错题,仔细阅读题目并充分结合你你已有的知识,找出其中10处需要改动的地方。请你一步步思考,把修改后的短文写在【答案】和<eoa>之间。\n完整的题目回答格式如下:【答案】 ...<eoa>\n 请你严格按照上述格式作答。\n题目如下:",
# "请比较下面两篇短文,找到第二篇和第一篇的10处不同,每处不同只涉及一个单词,请将结果写在【答案】和<eoa>之间。例如:【答案】1. 将play改为plays\n 2.增加了the\n ... <eoa>\n 完整的题目回答格式如下:【答案】(1) ... \n (2) ...\n ...(10) ...\n<eoa>\n请你严格按照上述格式作答。\n短文如下:"
# ],
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chinese_Language_Ancient_Poetry_Reading',
'prefix_prompt': '请解答下面的语文古代诗歌阅读题,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。\n完整的题目回答格式如下:(1)【答案】 ...<eoa>\n (2)【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chinese_Language_Practical_Text_Reading',
'prefix_prompt': '请解答下面的语文实用类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。\n完整的题目回答格式如下:(1)[答案】 ...<eoa>\n (2)【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chinese_Language_Literary_Text_Reading',
'prefix_prompt': '请解答下面的语文文学类文本阅读,仔细阅读题目,注意其中可能含有单选题和多选题。请你一步步思考并将最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。\n完整的题目回答格式如下:(1)[答案】 ...<eoa>\n (2)【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chinese_Language_Classical_Chinese_Reading',
'prefix_prompt': '请解答下面的语文文言文阅读,仔细阅读题目,前三题是单选题,最后一题要将文言文翻译为现代汉语。请你一步步思考并把最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。翻译题把翻译后的现代汉语句子写在【答案】后面,例如”【答案】今天天气很好 <eoa>”\n完整的题目回答格式如下:(1)[答案】 ...<eoa>\n (2)【答案】...<eoa>\n请你严格按照上述格式作答,如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
{
'type': 'subjective',
'keyword': '2010-2022_Chinese_Language_Language_and_Writing_Skills_Open-ended_Questions',
'prefix_prompt': '请解答下面的语文解答题,仔细阅读题目,注意其中可能含有选择题。请你一步步思考并将思考过程写在【解析】和<eoe>之间。请把你的最终答案写在【答案】和<eoa>之间。选择题你要从选项中选出符合题意的答案,例如“【答案】A <eoa>”。\n完整的题目回答格式如下:(1)【解析】 ...<eoe>\n【答案】 ...<eoa>\n (2)【解析】 ...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。如果不止一道题,请分别作答。\n题目如下:',
'comment': '',
},
]
# GaokaoBench
```bash
python3 run.py --models hf_internlm2_7b --datasets GaokaoBench_no_subjective_gen_d21e37 --debug
python3 run.py --models hf_internlm2_chat_7b --datasets GaokaoBench_no_subjective_gen_4c31db --debug
```
## Base Models
| model | GaokaoBench |
|:------------------------:|--------------:|
| llama-7b-turbomind | 14.55 |
| llama-13b-turbomind | 16.20 |
| llama-30b-turbomind | 16.14 |
| llama-65b-turbomind | 13.31 |
| llama-2-7b-turbomind | 15.02 |
| llama-2-13b-turbomind | 14.86 |
| llama-2-70b-turbomind | 16.36 |
| llama-3-8b-turbomind | 20.88 |
| llama-3-70b-turbomind | 19.98 |
| internlm2-1.8b-turbomind | 23.78 |
| internlm2-7b-turbomind | 41.41 |
| internlm2-20b-turbomind | 58.99 |
| qwen-1.8b-turbomind | 22.11 |
| qwen-7b-turbomind | 35.32 |
| qwen-14b-turbomind | 54.07 |
| qwen-72b-turbomind | 77.56 |
| qwen1.5-0.5b-hf | 30.67 |
| qwen1.5-1.8b-hf | 35.66 |
| qwen1.5-4b-hf | 54.31 |
| qwen1.5-7b-hf | 65.99 |
| qwen1.5-14b-hf | 66.60 |
| qwen1.5-32b-hf | 79.01 |
| qwen1.5-72b-hf | 80.26 |
| qwen1.5-moe-a2-7b-hf | 52.79 |
| mistral-7b-v0.1-hf | 14.35 |
| mistral-7b-v0.2-hf | 11.10 |
| mixtral-8x7b-v0.1-hf | 8.40 |
| mixtral-8x22b-v0.1-hf | 16.23 |
| yi-6b-hf | 31.70 |
| yi-34b-hf | 30.51 |
| deepseek-7b-base-hf | 17.02 |
| deepseek-67b-base-hf | 10.14 |
### Details
| model | 2010-2022_Math_II_MCQs | 2010-2022_Math_I_MCQs | 2010-2022_History_MCQs | 2010-2022_Biology_MCQs | 2010-2022_Political_Science_MCQs | 2010-2022_Physics_MCQs | 2010-2022_Chemistry_MCQs |
|:------------------------:|-------------------------:|------------------------:|-------------------------:|-------------------------:|-----------------------------------:|-------------------------:|---------------------------:|
| llama-7b-turbomind | 14.22 | 13.55 | 12.54 | 18.67 | 19.06 | 2.34 | 17.74 |
| llama-13b-turbomind | 18.81 | 15.89 | 21.25 | 22.67 | 15.62 | 1.56 | 25.81 |
| llama-30b-turbomind | 20.64 | 19.16 | 27.18 | 16.67 | 16.56 | 2.34 | 12.10 |
| llama-65b-turbomind | 21.10 | 15.89 | 11.50 | 20.00 | 5.94 | 1.56 | 21.77 |
| llama-2-7b-turbomind | 16.97 | 16.36 | 20.91 | 22.00 | 18.75 | 2.34 | 11.29 |
| llama-2-13b-turbomind | 14.68 | 11.68 | 26.13 | 16.00 | 17.81 | 2.34 | 20.97 |
| llama-2-70b-turbomind | 18.81 | 12.15 | 26.13 | 16.00 | 20.31 | 4.69 | 16.13 |
| llama-3-8b-turbomind | 4.13 | 7.94 | 37.63 | 24.67 | 26.25 | 5.47 | 21.77 |
| llama-3-70b-turbomind | 4.59 | 3.12 | 20.83 | 10.94 | 18.00 | 6.25 | 15.62 |
| internlm2-1.8b-turbomind | 20.64 | 22.90 | 39.72 | 30.00 | 25.94 | 10.94 | 31.45 |
| internlm2-7b-turbomind | 33.94 | 35.51 | 38.33 | 59.33 | 61.56 | 2.34 | 11.29 |
| internlm2-20b-turbomind | 59.17 | 51.40 | 65.16 | 74.00 | 82.19 | 28.91 | 54.03 |
| qwen-1.8b-turbomind | 29.36 | 30.84 | 19.51 | 26.00 | 22.19 | 5.47 | 27.42 |
| qwen-7b-turbomind | 22.48 | 28.04 | 45.64 | 43.33 | 62.19 | 3.91 | 33.87 |
| qwen-14b-turbomind | 54.13 | 56.25 | 82.93 | 72.00 | 85.00 | 4.69 | 65.62 |
| qwen-72b-turbomind | 73.12 | 64.49 | 91.67 | 90.62 | 58.75 | 44.53 | 79.03 |
| qwen1.5-0.5b-hf | 26.61 | 32.71 | 32.40 | 34.67 | 53.44 | 10.94 | 28.23 |
| qwen1.5-1.8b-hf | 36.24 | 33.18 | 56.45 | 36.00 | 49.38 | 6.25 | 33.06 |
| qwen1.5-4b-hf | 45.41 | 37.85 | 68.29 | 62.00 | 87.81 | 5.47 | 47.58 |
| qwen1.5-7b-hf | 56.42 | 53.74 | 85.02 | 69.33 | 86.88 | 28.12 | 70.16 |
| qwen1.5-14b-hf | 69.27 | 63.08 | 54.01 | 79.33 | 76.56 | 40.62 | 79.84 |
| qwen1.5-32b-hf | 71.10 | 61.68 | 92.68 | 93.33 | 95.94 | 45.31 | 83.06 |
| qwen1.5-72b-hf | 71.15 | 68.22 | 94.44 | 96.67 | 95.00 | 38.28 | 75.00 |
| qwen1.5-moe-a2-7b-hf | 35.32 | 29.44 | 68.64 | 44.67 | 75.00 | 17.97 | 59.68 |
| mistral-7b-v0.1-hf | 13.76 | 12.15 | 9.76 | 8.00 | 5.94 | 0.00 | 17.74 |
| mistral-7b-v0.2-hf | 6.88 | 5.61 | 10.45 | 12.00 | 4.06 | 0.78 | 14.52 |
| mixtral-8x7b-v0.1-hf | 3.67 | 1.87 | 0.35 | 0.00 | 0.00 | 0.78 | 0.81 |
| mixtral-8x22b-v0.1-hf | 16.51 | 15.89 | 1.39 | 3.33 | 9.69 | 0.00 | 13.71 |
| yi-6b-hf | 6.25 | 3.12 | 40.74 | 43.75 | 35.94 | 8.59 | 31.25 |
| yi-34b-hf | 12.50 | 4.17 | 31.11 | 5.00 | 20.62 | 2.34 | 0.89 |
| deepseek-7b-base-hf | 14.22 | 13.08 | 25.78 | 20.67 | 20.31 | 5.47 | 18.55 |
| deepseek-67b-base-hf | 3.67 | 4.21 | 8.36 | 7.33 | 4.69 | 1.56 | 4.84 |
| model | 2010-2013_English_MCQs | 2010-2022_Chinese_Modern_Lit | 2010-2022_English_Fill_in_Blanks | 2012-2022_English_Cloze_Test | 2010-2022_Geography_MCQs | 2010-2022_English_Reading_Comp | 2010-2022_Chinese_Lang_and_Usage_MCQs |
|:------------------------:|-------------------------:|-------------------------------:|-----------------------------------:|-------------------------------:|---------------------------:|---------------------------------:|----------------------------------------:|
| llama-7b-turbomind | 19.05 | 0.00 | 15.00 | 16.15 | 22.11 | 10.43 | 15.00 |
| llama-13b-turbomind | 22.86 | 0.00 | 8.50 | 8.46 | 24.21 | 9.36 | 20.00 |
| llama-30b-turbomind | 28.57 | 0.00 | 6.33 | 13.85 | 23.16 | 12.98 | 12.50 |
| llama-65b-turbomind | 21.90 | 0.00 | 8.00 | 13.85 | 16.84 | 12.34 | 10.00 |
| llama-2-7b-turbomind | 20.95 | 0.00 | 6.17 | 12.31 | 22.11 | 11.28 | 11.25 |
| llama-2-13b-turbomind | 16.19 | 0.00 | 9.83 | 13.08 | 22.11 | 7.66 | 10.00 |
| llama-2-70b-turbomind | 31.43 | 0.00 | 4.17 | 13.08 | 25.26 | 20.43 | 7.50 |
| llama-3-8b-turbomind | 1.90 | 1.15 | 42.00 | 7.69 | 29.47 | 17.66 | 17.50 |
| llama-3-70b-turbomind | 18.75 | 3.45 | 53.67 | 76.15 | 18.60 | 36.76 | 8.75 |
| internlm2-1.8b-turbomind | 33.33 | 3.45 | 15.67 | 13.85 | 32.63 | 10.43 | 25.00 |
| internlm2-7b-turbomind | 61.90 | 20.69 | 57.33 | 20.77 | 61.05 | 40.21 | 47.50 |
| internlm2-20b-turbomind | 72.38 | 37.93 | 62.33 | 19.23 | 74.74 | 38.51 | 48.75 |
| qwen-1.8b-turbomind | 47.62 | 9.20 | 13.50 | 12.31 | 25.26 | 16.38 | 21.25 |
| qwen-7b-turbomind | 42.86 | 12.64 | 35.83 | 26.15 | 51.58 | 17.87 | 30.00 |
| qwen-14b-turbomind | 89.58 | 3.45 | 5.00 | 23.85 | 93.02 | 21.10 | 40.62 |
| qwen-72b-turbomind | 71.43 | 81.25 | 88.17 | 96.25 | 95.79 | 79.57 | 90.00 |
| qwen1.5-0.5b-hf | 40.95 | 22.99 | 21.67 | 21.54 | 38.95 | 17.02 | 22.50 |
| qwen1.5-1.8b-hf | 85.71 | 29.89 | 22.17 | 30.00 | 34.74 | 20.43 | 27.50 |
| qwen1.5-4b-hf | 88.57 | 35.63 | 41.00 | 67.69 | 64.21 | 41.28 | 68.75 |
| qwen1.5-7b-hf | 93.33 | 14.94 | 59.33 | 70.00 | 61.05 | 67.87 | 61.25 |
| qwen1.5-14b-hf | 94.29 | 16.09 | 59.67 | 76.92 | 90.53 | 59.57 | 77.50 |
| qwen1.5-32b-hf | 94.29 | 43.68 | 82.83 | 38.46 | 97.89 | 75.96 | 67.50 |
| qwen1.5-72b-hf | 99.05 | 28.74 | 85.62 | 77.69 | 94.74 | 72.77 | 87.50 |
| qwen1.5-moe-a2-7b-hf | 65.71 | 36.78 | 51.67 | 75.38 | 72.63 | 61.28 | 33.75 |
| mistral-7b-v0.1-hf | 17.14 | 8.05 | 28.33 | 6.92 | 24.21 | 30.43 | 12.50 |
| mistral-7b-v0.2-hf | 7.62 | 9.20 | 23.17 | 6.15 | 25.26 | 19.15 | 7.50 |
| mixtral-8x7b-v0.1-hf | 0.00 | 4.60 | 33.83 | 10.77 | 37.89 | 25.96 | 3.75 |
| mixtral-8x22b-v0.1-hf | 7.62 | 4.17 | 51.33 | 14.62 | 53.68 | 21.91 | 10.00 |
| yi-6b-hf | 17.14 | 52.87 | 50.83 | 36.25 | 36.84 | 48.09 | 36.25 |
| yi-34b-hf | 0.00 | 59.77 | 76.67 | 86.92 | 67.44 | 61.06 | 81.25 |
| deepseek-7b-base-hf | 20.95 | 2.30 | 17.83 | 12.31 | 25.26 | 12.55 | 8.75 |
| deepseek-67b-base-hf | 1.90 | 9.20 | 27.33 | 30.00 | 40.00 | 13.19 | 3.75 |
## Chat Models
| model | GaokaoBench |
|:-----------------------------:|--------------:|
| qwen1.5-0.5b-chat-hf | 21.51 |
| qwen1.5-1.8b-chat-hf | 46.19 |
| qwen1.5-4b-chat-hf | 59.11 |
| qwen1.5-7b-chat-hf | 70.55 |
| qwen1.5-14b-chat-hf | 80.39 |
| qwen1.5-32b-chat-hf | 86.15 |
| qwen1.5-72b-chat-hf | 88.58 |
| qwen1.5-110b-chat-hf | 89.59 |
| internlm2-chat-1.8b-hf | 29.73 |
| internlm2-chat-1.8b-sft-hf | 28.79 |
| internlm2-chat-7b-hf | 54.54 |
| internlm2-chat-7b-sft-hf | 55.39 |
| internlm2-chat-20b-hf | 57.95 |
| internlm2-chat-20b-sft-hf | 57.62 |
| llama-3-8b-instruct-hf | 45.48 |
| llama-3-70b-instruct-hf | 65.91 |
| llama-3-8b-instruct-lmdeploy | 44.48 |
| llama-3-70b-instruct-lmdeploy | 67.06 |
| mistral-7b-instruct-v0.1-hf | 26.21 |
| mistral-7b-instruct-v0.2-hf | 32.17 |
| mixtral-8x7b-instruct-v0.1-hf | 42.46 |
### Details
| model | 2010-2022_Math_II_MCQs | 2010-2022_Math_I_MCQs | 2010-2022_History_MCQs | 2010-2022_Biology_MCQs | 2010-2022_Political_Science_MCQs | 2010-2022_Physics_MCQs | 2010-2022_Chemistry_MCQs |
|:-----------------------------:|-------------------------:|------------------------:|-------------------------:|-------------------------:|-----------------------------------:|-------------------------:|---------------------------:|
| qwen1.5-0.5b-chat-hf | 25.23 | 25.70 | 39.02 | 24.67 | 25.00 | 0.78 | 25.00 |
| qwen1.5-1.8b-chat-hf | 30.28 | 26.64 | 61.32 | 55.33 | 77.81 | 11.72 | 40.32 |
| qwen1.5-4b-chat-hf | 38.53 | 35.05 | 70.73 | 70.00 | 83.44 | 25.00 | 41.13 |
| qwen1.5-7b-chat-hf | 49.54 | 39.72 | 81.88 | 82.67 | 90.62 | 46.88 | 61.29 |
| qwen1.5-14b-chat-hf | 64.68 | 54.21 | 87.80 | 90.67 | 94.69 | 44.53 | 69.35 |
| qwen1.5-32b-chat-hf | 70.92 | 66.14 | 98.02 | 97.74 | 96.07 | 57.81 | 72.92 |
| qwen1.5-72b-chat-hf | 76.61 | 68.22 | 95.47 | 96.00 | 97.19 | 64.06 | 86.29 |
| qwen1.5-110b-chat-hf | 80.36 | 66.67 | 100.00 | 100.00 | 96.25 | 65.62 | 75.00 |
| internlm2-chat-1.8b-hf | 28.44 | 28.50 | 46.69 | 39.33 | 44.38 | 10.16 | 26.61 |
| internlm2-chat-1.8b-sft-hf | 23.85 | 20.09 | 55.75 | 40.67 | 53.12 | 14.84 | 30.65 |
| internlm2-chat-7b-hf | 45.87 | 42.52 | 77.70 | 75.33 | 76.56 | 16.41 | 38.71 |
| internlm2-chat-7b-sft-hf | 49.08 | 39.72 | 80.84 | 68.67 | 81.25 | 29.69 | 42.74 |
| internlm2-chat-20b-hf | 53.21 | 46.73 | 80.49 | 74.00 | 85.00 | 31.25 | 37.10 |
| internlm2-chat-20b-sft-hf | 51.83 | 47.20 | 86.06 | 78.00 | 88.12 | 35.16 | 45.16 |
| llama-3-8b-instruct-hf | 37.16 | 31.31 | 60.98 | 48.67 | 51.25 | 11.72 | 39.52 |
| llama-3-70b-instruct-hf | 58.26 | 52.34 | 63.76 | 75.33 | 75.31 | 36.72 | 53.23 |
| llama-3-8b-instruct-lmdeploy | 37.61 | 35.51 | 55.05 | 53.33 | 52.19 | 7.81 | 34.68 |
| llama-3-70b-instruct-lmdeploy | 75.00 | 55.56 | 61.11 | 73.68 | 70.00 | 40.62 | 43.75 |
| mistral-7b-instruct-v0.1-hf | 23.39 | 21.03 | 35.19 | 18.00 | 26.56 | 5.47 | 30.65 |
| mistral-7b-instruct-v0.2-hf | 31.19 | 19.63 | 38.33 | 40.00 | 35.94 | 20.31 | 34.68 |
| mixtral-8x7b-instruct-v0.1-hf | 41.28 | 37.85 | 52.26 | 47.33 | 50.00 | 25.78 | 43.55 |
| model | 2010-2013_English_MCQs | 2010-2022_Chinese_Modern_Lit | 2010-2022_English_Fill_in_Blanks | 2012-2022_English_Cloze_Test | 2010-2022_Geography_MCQs | 2010-2022_English_Reading_Comp | 2010-2022_Chinese_Lang_and_Usage_MCQs |
|:-----------------------------:|-------------------------:|-------------------------------:|-----------------------------------:|-------------------------------:|---------------------------:|---------------------------------:|----------------------------------------:|
| qwen1.5-0.5b-chat-hf | 32.38 | 10.34 | 0.00 | 2.31 | 27.37 | 15.11 | 18.75 |
| qwen1.5-1.8b-chat-hf | 69.52 | 42.53 | 56.33 | 2.31 | 61.05 | 32.98 | 35.00 |
| qwen1.5-4b-chat-hf | 70.48 | 58.62 | 82.33 | 16.15 | 68.42 | 68.51 | 47.50 |
| qwen1.5-7b-chat-hf | 83.81 | 71.26 | 85.17 | 57.69 | 81.05 | 78.94 | 66.25 |
| qwen1.5-14b-chat-hf | 93.33 | 78.16 | 97.17 | 71.54 | 91.58 | 94.26 | 81.25 |
| qwen1.5-32b-chat-hf | 100.00 | 81.61 | 95.83 | 90.00 | 97.89 | 92.43 | 92.86 |
| qwen1.5-72b-chat-hf | 98.10 | 83.91 | 98.00 | 90.77 | 94.74 | 96.38 | 96.25 |
| qwen1.5-110b-chat-hf | 100.00 | 91.95 | 98.50 | 97.69 | 95.35 | 98.44 | 100.00 |
| internlm2-chat-1.8b-hf | 38.10 | 6.90 | 0.67 | 1.54 | 56.84 | 23.19 | 30.00 |
| internlm2-chat-1.8b-sft-hf | 50.48 | 0.00 | 0.00 | 0.00 | 27.37 | 11.91 | 32.50 |
| internlm2-chat-7b-hf | 60.95 | 67.82 | 7.00 | 7.69 | 70.53 | 79.79 | 38.75 |
| internlm2-chat-7b-sft-hf | 60.00 | 71.26 | 6.50 | 0.77 | 68.42 | 77.02 | 42.50 |
| internlm2-chat-20b-hf | 60.95 | 43.68 | 34.83 | 4.62 | 71.58 | 62.55 | 43.75 |
| internlm2-chat-20b-sft-hf | 75.24 | 47.13 | 1.00 | 2.31 | 80.00 | 65.96 | 37.50 |
| llama-3-8b-instruct-hf | 50.48 | 36.78 | 30.83 | 21.54 | 57.89 | 81.70 | 28.75 |
| llama-3-70b-instruct-hf | 73.33 | 59.77 | 82.83 | 24.62 | 73.68 | 91.28 | 45.00 |
| llama-3-8b-instruct-lmdeploy | 52.38 | 42.53 | 21.33 | 18.46 | 58.95 | 81.28 | 26.25 |
| llama-3-70b-instruct-lmdeploy | 87.50 | 62.07 | 84.38 | 26.92 | 72.63 | 91.20 | 56.25 |
| mistral-7b-instruct-v0.1-hf | 38.10 | 18.39 | 30.50 | 6.15 | 31.58 | 38.72 | 18.75 |
| mistral-7b-instruct-v0.2-hf | 41.90 | 31.03 | 28.00 | 20.77 | 29.47 | 42.13 | 15.00 |
| mixtral-8x7b-instruct-v0.1-hf | 49.52 | 39.08 | 41.33 | 9.23 | 44.21 | 43.19 | 21.25 |
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import HLEDataset
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='answer')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Evaluation configuration
math_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=HLEDataset,
path='cais/hle',
reader_cfg=math_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
hle_datasets = [
dict(
type=HLEDataset,
abbr='hle_llmjudge',
path='cais/hle',
category='Biology/Medicine',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
\ No newline at end of file
from mmengine.config import read_base
with read_base():
# Default use LLM as a judge
from .hle_llmverify_gen_6ff468 import hle_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import HLEDataset
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='answer')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. THE STANDARD ANSWER IS ALWAYS CORRECT AND THE QUESTION IS PERFECTLY VALID. NEVER QUESTION THEM.
2. ONLY compare the FINAL ANSWER - COMPLETELY IGNORE any potential errors in the REASONING PROCESSES.
3. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. Before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct.
4. Some answers may consist of multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. Regardless of the question type, the final answer will be considered correct as long as it matches the standard answer, regardless of whether the reasoning process is correct. For multiple-select questions and multi-blank fill-in-the-blank questions, all corresponding options or blanks must be answered correctly and match the standard answer exactly to be deemed correct.
5. If the prediction is given with \\boxed{{}}, please ignore the \\boxed{{}} and only judge whether the candidate's answer is consistent with the standard answer.
6. If the candidate's answer is invalid (e.g., incomplete (cut off mid-response), lots of unnormal repetitive content, or irrelevant to the question, saying it can't answer the question because some irresistible factors, like ethical issues, no enough information, etc.), select option C (INVALID).Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
C: INVALID
Just return the letters "A", "B", or "C", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT, or INVALID. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>:
{problem}
<Original Question End>
<Standard Answer Begin>:
{answer}
<Standard Answer End>
<Candidate's Answer Begin>:
{prediction}
<Candidate's Answer End>
Judging the correctness of the candidate's answer:
""".strip()
# Evaluation configuration
math_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=HLEDataset,
path='cais/hle',
reader_cfg=math_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
hle_datasets = [
dict(
type=HLEDataset,
abbr='hle_llmjudge',
path='cais/hle',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
\ No newline at end of file
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import HLEDataset
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='answer')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Evaluation configuration
math_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=HLEDataset,
path='cais/hle',
reader_cfg=math_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
hle_datasets = [
dict(
type=HLEDataset,
abbr='hle_llmjudge',
path='cais/hle',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator
from opencompass.openicl.icl_inferencer import ChatInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
reader_cfg = dict(
input_columns=[
'prompt_trans',
],
output_column='prompt_id', # useless
)
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='{prompt_trans}', # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=ChatInferencer),
)
# Evaluation configuration
healthbench_dataset = dict(
type=HealthBenchDataset,
abbr='healthbench',
path='huihuixu/healthbench',
subset='',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name=''),
pred_role='BOT',
),
)
healthbench_hard_dataset = dict(
type=HealthBenchDataset,
abbr='healthbench_hard',
path='huihuixu/healthbench',
subset='hard',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='hard'),
pred_role='BOT',
),
)
healthbench_consensus_dataset = dict(
type=HealthBenchDataset,
abbr='healthbench_consensus',
path='huihuixu/healthbench',
subset='consensus',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='consensus'),
pred_role='BOT',
),
)
healthbench_datasets = [healthbench_dataset, healthbench_hard_dataset, healthbench_consensus_dataset]
\ No newline at end of file
# IFEval
## Introduction
The following introduction comes from the abstract in [Instruction Following Evaluation for Large Language Models](arxiv.org/abs/2311.07911)
```
IFEval is a straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set of "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times". We identified 25 types of those verifiable instructions and constructed around 500 prompts, with each prompt containing one or more verifiable instructions.
```
## Official link
### Paper
[Instruction Following Evaluation for Large Language Models](arxiv.org/abs/2311.07911)
### Repository
[instruction_following_eval](github.com/google-research/google-research/tree/master/instruction_following_eval)
## Examples
Input example I:
```
Write a 300+ word summary of the wikipedia page \"https://en.wikipedia.org/wiki/Raymond_III,_Count_of_Tripoli\". Do not use any commas and highlight at least 3 sections that has titles in markdown format, for example *highlighted section part 1*, *highlighted section part 2*, *highlighted section part 3*.
```
Output example I (from GPT-4):
```
Raymond III was the Count of Tripoli from 1152 to 1187 and Prince of Galilee and Tiberias in the Kingdom of Jerusalem. He was born in 1140 to Raymond II of Tripoli and Hodierna of Jerusalem. His mother was the daughter of Baldwin II of Jerusalem. \n\n*Early Life and Succession*\n\nRaymond III was only a child when his father was murdered. His mother Hodierna was regent until Raymond came of age. In 1155 Raymond married Eschiva the daughter of Walter I of Beirut. They had three children: Raymond IV Bertrand and a daughter who married Guy of Lusignan. \n\n*Reign*\n\nRaymond III's reign was marked by a series of conflicts and alliances with the Muslim world. He was involved in the defense of the Kingdom of Jerusalem against Nur ad-Din and later Saladin. He was also a key figure in the internal politics of the kingdom. He was a supporter of the queen mother Amalric of Jerusalem and opposed the succession of Guy of Lusignan. \n\n*Later Life and Death*\n\nIn 1187 Raymond III was part of the disastrous Battle of Hattin where the Christian forces were defeated by Saladin. He was one of the few to escape the battlefield but died later that year. His son Raymond IV succeeded him as Count of Tripoli. \n\nRaymond III's life and reign were marked by the complex politics and warfare of the Crusader states. He was a key figure in the defense of the Kingdom of Jerusalem and his death marked a significant loss for the Christian forces in the region. His legacy is a testament to the turbulent times in which he lived and the challenges faced by the Crusader states in their relations with the Muslim world.
```
Input example II:
```
"I am planning a trip to Japan, and I would like thee to write an itinerary for my journey in a Shakespearean style. You are not allowed to use any commas in your response."
```
Output example II (from GPT-4):
```
Hark! Hearken to the tale of thy journey to the land of the rising sun, Japan. \n\nUpon the morrow of thy arrival, thou shalt find thyself in Tokyo, the city of lights and towers. In the morn, thou shalt explore the grandeur of the Imperial Palace, a sight to behold. As the sun reaches its zenith, thou shalt partake in a feast of sushi, a delicacy of the land. As the day wanes, thou shalt wander through the electric streets of Akihabara, a realm of technology and animation.\n\nOn the second day, thou shalt journey to the ancient city of Kyoto. In the soft light of dawn, thou shalt visit the golden pavilion of Kinkaku-ji, a vision of beauty and tranquility. As the day unfolds, thou shalt wander through the bamboo grove of Arashiyama, a place of peace and serenity. As the sun sets, thou shalt partake in a traditional tea ceremony, a ritual of grace and harmony.\n\nOn the third day, thou shalt travel to the city of Hiroshima. In the morning, thou shalt pay thy respects at the Peace Memorial Park, a place of remembrance and hope. As the day progresses, thou shalt journey to the island of Miyajima, home to the floating torii gate, a sight of wonder and awe. As the day ends, thou shalt feast on okonomiyaki, a dish of delight and satisfaction.\n\nOn the fourth day, thou shalt return to Tokyo. In the morning, thou shalt ascend the Tokyo Skytree, a tower of height and grandeur. As the day unfolds, thou shalt explore the bustling streets of Shibuya, a realm of fashion and culture. As the sun sets, thou shalt dine in the vibrant district of Roppongi, a place of gastronomy and nightlife.\n\nOn the fifth and final day, thou shalt bid farewell to the land of the rising sun. With memories of beauty and wonder, thou shalt embark on thy journey home, carrying with thee the spirit of Japan. \n\nThus ends the tale of thy journey, a tale of exploration and discovery. May thy travels be filled with joy and adventure, and may the land of the rising sun forever hold a place in thy heart.
```
## Evaluation results
```
dataset version metric mode baichuan2-7b-chat-hf baichuan2-13b-chat-hf internlm2-chat-7b-hf internlm2-chat-20b-hf llama-2-7b-chat-hf llama-2-13b-chat-hf
--------- --------- ---------------------------- ------ ---------------------- ----------------------- ---------------------- ----------------------- -------------------- ---------------------
IFEval 3321a3 Prompt-level-strict-accuracy gen 36.04 35.49 38.26 33.09 33.46 33.64
IFEval 3321a3 Inst-level-strict-accuracy gen 46.76 46.76 49.16 45.32 45.68 45.44
IFEval 3321a3 Prompt-level-loose-accuracy gen 37.52 37.71 42.51 39.37 43.81 47.32
IFEval 3321a3 Inst-level-loose-accuracy gen 48.44 49.16 53.72 51.08 55.64 58.03
```
## Reference
```
@article{zhou2023instruction,
title={Instruction-Following Evaluation for Large Language Models},
author={Zhou, Jeffrey and Lu, Tianjian and Mishra, Swaroop and Brahma, Siddhartha and Basu, Sujoy and Luan, Yi and Zhou, Denny and Hou, Le},
journal={arXiv preprint arXiv:2311.07911},
year={2023}
}
```
from mmengine.config import read_base
with read_base():
from .IFEval_gen_353ae7 import ifeval_datasets # noqa: F401, F403
\ No newline at end of file
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import IFEvalDataset, IFEvaluator
ifeval_reader_cfg = dict(
input_columns=['prompt'], output_column='reference')
ifeval_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='{prompt}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=1025))
ifeval_eval_cfg = dict(
evaluator=dict(type=IFEvaluator),
pred_role='BOT',
)
ifeval_datasets = [
dict(
abbr='IFEval',
type=IFEvalDataset,
path='data/ifeval/input_data.jsonl',
reader_cfg=ifeval_reader_cfg,
infer_cfg=ifeval_infer_cfg,
eval_cfg=ifeval_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import IFEvalDataset, IFEvaluator
ifeval_reader_cfg = dict(
input_columns=['prompt'], output_column='reference')
ifeval_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='{prompt}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
ifeval_eval_cfg = dict(
evaluator=dict(type=IFEvaluator),
pred_role='BOT',
)
ifeval_datasets = [
dict(
abbr='IFEval',
type=IFEvalDataset,
path='data/ifeval/input_data.jsonl',
reader_cfg=ifeval_reader_cfg,
infer_cfg=ifeval_infer_cfg,
eval_cfg=ifeval_eval_cfg)
]
# IFEval
```bash
python3 run.py --models hf_internlm2_chat_7b --datasets IFEval_gen_3321a3 --debug
```
## Chat Models
| model | Prompt-level-strict-accuracy | Inst-level-strict-accuracy | Prompt-level-loose-accuracy | Inst-level-loose-accuracy |
|:-----------------------------:|-------------------------------:|-----------------------------:|------------------------------:|----------------------------:|
| qwen1.5-0.5b-chat-hf | 13.12 | 23.26 | 15.71 | 26.38 |
| qwen1.5-1.8b-chat-hf | 16.08 | 26.26 | 18.30 | 29.02 |
| qwen1.5-4b-chat-hf | 25.51 | 35.97 | 28.84 | 39.81 |
| qwen1.5-7b-chat-hf | 38.82 | 50.00 | 42.70 | 53.48 |
| qwen1.5-14b-chat-hf | 42.51 | 54.20 | 49.17 | 59.95 |
| qwen1.5-32b-chat-hf | 49.54 | 60.43 | 53.97 | 64.39 |
| qwen1.5-72b-chat-hf | 51.02 | 61.99 | 57.12 | 67.27 |
| qwen1.5-110b-chat-hf | 55.08 | 65.59 | 61.18 | 70.86 |
| internlm2-chat-1.8b-hf | 18.30 | 28.78 | 21.44 | 32.01 |
| internlm2-chat-1.8b-sft-hf | 18.67 | 31.18 | 19.78 | 32.85 |
| internlm2-chat-7b-hf | 34.75 | 46.28 | 40.48 | 51.44 |
| internlm2-chat-7b-sft-hf | 39.19 | 50.12 | 42.33 | 52.76 |
| internlm2-chat-20b-hf | 36.41 | 48.68 | 40.67 | 53.24 |
| internlm2-chat-20b-sft-hf | 44.55 | 55.64 | 46.77 | 58.03 |
| llama-3-8b-instruct-hf | 68.02 | 76.74 | 75.42 | 82.85 |
| llama-3-70b-instruct-hf | 78.00 | 84.65 | 84.29 | 89.21 |
| llama-3-8b-instruct-lmdeploy | 69.13 | 77.46 | 77.26 | 83.93 |
| llama-3-70b-instruct-lmdeploy | 75.97 | 82.97 | 83.18 | 88.37 |
| mistral-7b-instruct-v0.1-hf | 40.30 | 50.96 | 41.96 | 53.48 |
| mistral-7b-instruct-v0.2-hf | 49.17 | 60.43 | 51.94 | 64.03 |
| mixtral-8x7b-instruct-v0.1-hf | 50.09 | 60.67 | 55.64 | 65.83 |
# LCBench2023
LCBench2023 collects questions from leetcode weekly competitions between 2022 and 2023. It contains Chinese and English versions, each with 581 questions.
## Base Models
| model | lcbench/pass@1 | en/pass@1 | cn/pass@1 | lcbench/pass | lcbench/timeout | lcbench/failed | lcbench/wrong_answer | en/pass | en/timeout | en/failed | en/wrong_answer | cn/pass | cn/timeout | cn/failed | cn/wrong_answer |
|:------------------------:|-----------------:|------------:|------------:|---------------:|------------------:|-----------------:|-----------------------:|----------:|-------------:|------------:|------------------:|----------:|-------------:|------------:|------------------:|
| llama-7b-turbomind | 1.30 | 2.61 | 0.00 | 15 | 28 | 843 | 266 | 15 | 14 | 290 | 257 | 0 | 14 | 553 | 9 |
| llama-13b-turbomind | 2.09 | 4.17 | 0.00 | 24 | 31 | 823 | 274 | 24 | 16 | 270 | 266 | 0 | 15 | 553 | 8 |
| llama-30b-turbomind | 3.48 | 6.78 | 0.17 | 40 | 41 | 780 | 291 | 39 | 25 | 226 | 286 | 1 | 16 | 554 | 5 |
| llama-65b-turbomind | 4.00 | 7.83 | 0.17 | 46 | 22 | 755 | 329 | 45 | 10 | 205 | 316 | 1 | 12 | 550 | 13 |
| llama-2-7b-turbomind | 0.78 | 1.57 | 0.00 | 9 | 28 | 825 | 290 | 9 | 16 | 274 | 277 | 0 | 12 | 551 | 13 |
| llama-2-13b-turbomind | 2.52 | 5.04 | 0.00 | 29 | 29 | 761 | 333 | 29 | 17 | 207 | 323 | 0 | 12 | 554 | 10 |
| llama-2-70b-turbomind | 5.04 | 9.57 | 0.52 | 58 | 47 | 684 | 363 | 55 | 28 | 140 | 353 | 3 | 19 | 544 | 10 |
| llama-3-8b-turbomind | 16.59 | 16.70 | 16.49 | 191 | 30 | 236 | 695 | 96 | 13 | 119 | 348 | 95 | 17 | 117 | 347 |
| llama-3-70b-turbomind | 38.49 | 38.43 | 38.54 | 443 | 2 | 120 | 587 | 221 | 2 | 58 | 295 | 222 | 0 | 62 | 292 |
| internlm2-1.8b-turbomind | 4.34 | 5.04 | 3.65 | 50 | 33 | 333 | 736 | 29 | 18 | 177 | 352 | 21 | 15 | 156 | 384 |
| internlm2-7b-turbomind | 12.16 | 12.52 | 11.81 | 140 | 41 | 166 | 805 | 72 | 23 | 92 | 389 | 68 | 18 | 74 | 416 |
| internlm2-20b-turbomind | 18.46 | 20.96 | 15.97 | 213 | 54 | 134 | 751 | 121 | 24 | 57 | 374 | 92 | 30 | 77 | 377 |
| qwen-1.8b-turbomind | 1.82 | 1.91 | 1.74 | 21 | 31 | 449 | 651 | 11 | 17 | 208 | 340 | 10 | 14 | 241 | 311 |
| qwen-7b-turbomind | 4.95 | 5.39 | 4.51 | 57 | 37 | 388 | 670 | 31 | 15 | 197 | 333 | 26 | 22 | 191 | 337 |
| qwen-14b-turbomind | 8.86 | 9.74 | 7.99 | 102 | 2 | 245 | 803 | 56 | 0 | 120 | 400 | 46 | 2 | 125 | 403 |
| qwen-72b-turbomind | 16.86 | 19.48 | 14.24 | 194 | 12 | 229 | 717 | 112 | 4 | 112 | 348 | 82 | 8 | 117 | 369 |
| qwen1.5-0.5b-hf | 0.87 | 0.52 | 1.22 | 10 | 29 | 499 | 614 | 3 | 10 | 259 | 304 | 7 | 19 | 240 | 310 |
| qwen1.5-1.8b-hf | 2.00 | 2.26 | 1.74 | 23 | 26 | 434 | 669 | 13 | 10 | 220 | 333 | 10 | 16 | 214 | 336 |
| qwen1.5-4b-hf | 5.65 | 6.96 | 4.34 | 65 | 37 | 349 | 701 | 40 | 19 | 161 | 356 | 25 | 18 | 188 | 345 |
| qwen1.5-7b-hf | 6.69 | 8.00 | 5.38 | 77 | 30 | 283 | 762 | 46 | 12 | 124 | 394 | 31 | 18 | 159 | 368 |
| qwen1.5-14b-hf | 12.69 | 13.74 | 11.63 | 146 | 43 | 232 | 731 | 79 | 22 | 122 | 353 | 67 | 21 | 110 | 378 |
| qwen1.5-32b-hf | 14.34 | 16.70 | 11.98 | 165 | 45 | 191 | 751 | 96 | 18 | 88 | 374 | 69 | 27 | 103 | 377 |
| qwen1.5-72b-hf | 15.29 | 15.65 | 14.93 | 176 | 11 | 242 | 723 | 90 | 7 | 118 | 361 | 86 | 4 | 124 | 362 |
| qwen1.5-moe-a2-7b-hf | 9.56 | 10.09 | 9.03 | 110 | 10 | 272 | 760 | 58 | 5 | 129 | 384 | 52 | 5 | 143 | 376 |
| mistral-7b-v0.1-hf | 11.38 | 11.83 | 10.94 | 131 | 30 | 221 | 770 | 68 | 11 | 100 | 397 | 63 | 19 | 121 | 373 |
| mistral-7b-v0.2-hf | 11.38 | 11.13 | 11.63 | 131 | 2 | 259 | 760 | 64 | 2 | 124 | 386 | 67 | 0 | 135 | 374 |
| mixtral-8x7b-v0.1-hf | 21.11 | 21.39 | 20.83 | 243 | 7 | 165 | 737 | 123 | 4 | 76 | 373 | 120 | 3 | 89 | 364 |
| mixtral-8x22b-v0.1-hf | 30.97 | 31.22 | 30.73 | 357 | 6 | 131 | 658 | 180 | 3 | 66 | 327 | 177 | 3 | 65 | 331 |
| yi-6b-hf | 2.43 | 2.78 | 2.08 | 28 | 7 | 456 | 661 | 16 | 2 | 214 | 344 | 12 | 5 | 242 | 317 |
| yi-34b-hf | 8.25 | 8.35 | 8.16 | 95 | 8 | 319 | 730 | 48 | 5 | 163 | 360 | 47 | 3 | 156 | 370 |
| deepseek-7b-base-hf | 5.30 | 5.22 | 5.38 | 61 | 7 | 325 | 759 | 30 | 4 | 165 | 377 | 31 | 3 | 160 | 382 |
| deepseek-67b-base-hf | 26.50 | 26.96 | 26.04 | 305 | 9 | 202 | 636 | 155 | 4 | 105 | 312 | 150 | 5 | 97 | 324 |
## Chat Models
| model | lcbench/pass@1 | en/pass@1 | cn/pass@1 | lcbench/pass | lcbench/timeout | lcbench/failed | lcbench/wrong_answer | en/pass | en/timeout | en/failed | en/wrong_answer | cn/pass | cn/timeout | cn/failed | cn/wrong_answer |
|:-----------------------------:|-----------------:|------------:|------------:|---------------:|------------------:|-----------------:|-----------------------:|----------:|-------------:|------------:|------------------:|----------:|-------------:|------------:|------------------:|
| qwen1.5-0.5b-chat-hf | 0.00 | 0.00 | 0.00 | 0 | 0 | 1152 | 0 | 0 | 0 | 576 | 0 | 0 | 0 | 576 | 0 |
| qwen1.5-1.8b-chat-hf | 1.65 | 1.57 | 1.74 | 19 | 5 | 603 | 525 | 9 | 2 | 298 | 267 | 10 | 3 | 305 | 258 |
| qwen1.5-4b-chat-hf | 5.56 | 5.22 | 5.90 | 64 | 17 | 484 | 587 | 30 | 8 | 242 | 296 | 34 | 9 | 242 | 291 |
| qwen1.5-7b-chat-hf | 8.78 | 9.57 | 7.99 | 101 | 25 | 333 | 693 | 55 | 12 | 151 | 358 | 46 | 13 | 182 | 335 |
| qwen1.5-14b-chat-hf | 14.42 | 16.52 | 12.33 | 166 | 18 | 222 | 746 | 95 | 10 | 110 | 361 | 71 | 8 | 112 | 385 |
| qwen1.5-32b-chat-hf | 10.78 | 13.04 | 8.51 | 124 | 15 | 516 | 497 | 75 | 10 | 195 | 296 | 49 | 5 | 321 | 201 |
| qwen1.5-72b-chat-hf | 18.77 | 18.78 | 18.75 | 216 | 23 | 164 | 749 | 108 | 12 | 89 | 367 | 108 | 11 | 75 | 382 |
| qwen1.5-110b-chat-hf | 34.58 | 34.43 | 34.72 | 399 | 20 | 176 | 557 | 199 | 12 | 85 | 280 | 200 | 8 | 91 | 277 |
| internlm2-chat-1.8b-hf | 4.52 | 5.04 | 3.99 | 52 | 10 | 364 | 726 | 29 | 4 | 172 | 371 | 23 | 6 | 192 | 355 |
| internlm2-chat-1.8b-sft-hf | 3.56 | 3.83 | 3.30 | 41 | 12 | 403 | 696 | 22 | 6 | 211 | 337 | 19 | 6 | 192 | 359 |
| internlm2-chat-7b-hf | 14.60 | 13.74 | 15.45 | 168 | 12 | 238 | 734 | 79 | 7 | 142 | 348 | 89 | 5 | 96 | 386 |
| internlm2-chat-7b-sft-hf | 14.34 | 14.61 | 14.06 | 165 | 9 | 275 | 703 | 84 | 3 | 174 | 315 | 81 | 6 | 101 | 388 |
| internlm2-chat-20b-hf | 19.64 | 20.00 | 19.27 | 226 | 11 | 191 | 724 | 115 | 7 | 83 | 371 | 111 | 4 | 108 | 353 |
| internlm2-chat-20b-sft-hf | 20.55 | 19.91 | 21.18 | 237 | 11 | 195 | 709 | 115 | 6 | 94 | 361 | 122 | 5 | 101 | 348 |
| llama-3-8b-instruct-hf | 28.50 | 29.04 | 27.95 | 328 | 17 | 95 | 712 | 167 | 7 | 44 | 358 | 161 | 10 | 51 | 354 |
| llama-3-70b-instruct-hf | 45.44 | 46.09 | 44.79 | 523 | 8 | 52 | 569 | 265 | 2 | 25 | 284 | 258 | 6 | 27 | 285 |
| llama-3-8b-instruct-lmdeploy | 29.02 | 29.39 | 28.65 | 334 | 19 | 94 | 705 | 169 | 11 | 42 | 354 | 165 | 8 | 52 | 351 |
| llama-3-70b-instruct-lmdeploy | 44.66 | 46.78 | 42.53 | 514 | 11 | 44 | 583 | 269 | 5 | 19 | 283 | 245 | 6 | 25 | 300 |
| mistral-7b-instruct-v0.1-hf | 9.82 | 10.78 | 8.85 | 113 | 17 | 316 | 706 | 62 | 9 | 152 | 353 | 51 | 8 | 164 | 353 |
| mistral-7b-instruct-v0.2-hf | 7.90 | 6.26 | 9.55 | 91 | 8 | 572 | 481 | 36 | 4 | 345 | 191 | 55 | 4 | 227 | 290 |
| mixtral-8x7b-instruct-v0.1-hf | 16.29 | 15.91 | 16.67 | 188 | 13 | 370 | 581 | 92 | 6 | 241 | 237 | 96 | 7 | 129 | 344 |
from mmengine.config import read_base
with read_base():
from .lcbench_gen_5ff288 import LCBench_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import LCDataset, LCPassKEvaluator
LC_reader_cfg = dict(
input_columns=['text', 'test_list'], output_column='test_column')
LC_en_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with [\"a\",\"b\",\"c\"], we need to push the key one time to type \"a\", two times to type \"b\", and three times to type \"c\" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes(\"abcde\") == 5 \n assert minimumPushes(\"xyzxyzxyzxyz\") == 12 \n assert minimumPushes(\"aabbccddeeffgghhiiiiii\") == 24 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'
),
dict(role='BOT', prompt='[BEGIN]\n'),
], )),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512))
LC_cn_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 [\"a\",\"b\",\"c\"],我们需要按一次键来输入 \"a\",按两次键来输入 \"b\",按三次键来输入 \"c\"\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes(\"abcde\") == 5 \n assert minimumPushes(\"xyzxyzxyzxyz\") == 12 \n assert minimumPushes(\"aabbccddeeffgghhiiiiii\") == 24 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'
),
dict(role='BOT', prompt='[BEGIN]\n'),
], )),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512))
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
LCBench_datasets = [
dict(
type=LCDataset,
abbr='lcbench_en',
path='./data/LCBench2023/LCBench2023.jsonl',
num_repeats=1,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_en_infer_cfg,
eval_cfg=LC_eval_cfg),
dict(
type=LCDataset,
abbr='lcbench_cn',
path='./data/LCBench2023/LCBench2023_cn.jsonl',
num_repeats=1,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_cn_infer_cfg,
eval_cfg=LC_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import LCDataset, LCPassKEvaluator
LC_difficulties_list = ['EASY', 'MEDIUM', 'HARD']
LC_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_column')
LC_en_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with ["a","b","c"], we need to push the key one time to type "a", two times to type "b", and three times to type "c" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
dict(role='HUMAN', prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"),
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
dict(role='BOT', prompt='[BEGIN]\n'),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
LC_cn_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 ["a","b","c"],我们需要按一次键来输入 "a",按两次键来输入 "b",按三次键来输入 "c"。\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'),
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'),
dict(role='BOT', prompt='[BEGIN]\n'),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
LCBench_datasets = []
for difficulty in LC_difficulties_list:
LCBench_datasets.append(
dict(
type=LCDataset,
abbr='lcbench_en-' + difficulty,
path='data/LCBench2023/LCBench2023.jsonl',
difficulty=difficulty,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_en_infer_cfg,
eval_cfg=LC_eval_cfg,
)
)
LCBench_datasets.append(
dict(
type=LCDataset,
abbr='lcbench_cn-' + difficulty,
path='data/LCBench2023/LCBench2023_cn.jsonl',
difficulty=difficulty,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_cn_infer_cfg,
eval_cfg=LC_eval_cfg,
)
)
from mmengine.config import read_base
with read_base():
from .lcbench_repeat10_gen_5ff288 import LCBench_repeat10_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import LCDataset, LCPassKEvaluator
LC_reader_cfg = dict(
input_columns=['text', 'test_list'], output_column='test_column')
LC_en_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with [\"a\",\"b\",\"c\"], we need to push the key one time to type \"a\", two times to type \"b\", and three times to type \"c\" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes(\"abcde\") == 5 \n assert minimumPushes(\"xyzxyzxyzxyz\") == 12 \n assert minimumPushes(\"aabbccddeeffgghhiiiiii\") == 24 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'
),
dict(role='BOT', prompt='[BEGIN]\n'),
], )),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512))
LC_cn_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt="你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 [\"a\",\"b\",\"c\"],我们需要按一次键来输入 \"a\",按两次键来输入 \"b\",按三次键来输入 \"c\"\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes(\"abcde\") == 5 \n assert minimumPushes(\"xyzxyzxyzxyz\") == 12 \n assert minimumPushes(\"aabbccddeeffgghhiiiiii\") == 24 \n"
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'
),
dict(
role='BOT',
prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "
),
dict(
role='HUMAN',
prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'
),
dict(role='BOT', prompt='[BEGIN]\n'),
], )),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512))
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
LCBench_repeat10_datasets = [
dict(
type=LCDataset,
abbr='lcbench_en_repeat10',
path='./data/LCBench2023/LCBench2023.jsonl',
num_repeats=10,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_en_infer_cfg,
eval_cfg=LC_eval_cfg),
dict(
type=LCDataset,
abbr='lcbench_cn_repeat10',
path='./data/LCBench2023/LCBench2023_cn.jsonl',
num_repeats=10,
reader_cfg=LC_reader_cfg,
infer_cfg=LC_cn_infer_cfg,
eval_cfg=LC_eval_cfg)
]
from mmengine.config import read_base
with read_base():
from .MMLUArabic_gen_326684 import MMLUArabic_datasets # noqa: F401, F403
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment