"src/vscode:/vscode.git/clone" did not exist on "4c42a46dc04b43157b9894c9546bf05aa0e27c2c"
Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
from mmengine.config import read_base
with read_base():
from .mathbench_2024_gen_19e486 import mathbench_datasets # noqa: F401, F403
zero_shot_prompts = {
'single_choice_cn_with_reasoning': [
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:'),
],
'single_choice_cn': [
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n所以答案是:'),
],
'single_choice_en_with_reasoning': [
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with "Therefore, the correct answer is option X", Where "X" is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet\'s think step by step:'),
],
'single_choice_en': [
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nThe answer is:'),
],
'cloze_en': [
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
],
'cloze_cn': [
dict(role='HUMAN', prompt='{question}\n请一步一步推理,并在最后用\\boxed{}给出你的答案。'),
]
}
few_shot_prompts = {
'single_choice_cn': [
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____?\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
dict(role='BOT', prompt='回答: D'),
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
dict(role='BOT', prompt='回答: D'),
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
dict(role='BOT', prompt='回答: A'),
dict(role='HUMAN', prompt='问题: 圆锥的底面半径为2,高为4.一个圆柱的下底面在圆锥的底面上,上底面的圆周在圆锥的侧面上,当圆柱侧面积为$4 \\pi$时,该圆柱的体积为____\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
dict(role='BOT', prompt='回答: B'),
dict(role='HUMAN', prompt='问题: {question}'),
dict(role='BOT', prompt='回答: {answer}'),
],
'single_choice_cn_with_reasoning' : [
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
dict(role='BOT', prompt='回答: 因为首先,我们将等式两边同时乘以$(3 + i)$的共轭复数$(3 - i)$,以便消去分母中的虚数部分:$z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$,这样做的目的是利用复数乘法的性质,从而消去分母中的虚数部分。我们进行乘法运算:$z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$,$z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$,由于$i^2 = -1$,我们可以进一步简化:$z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{{1}{2} - \\frac{1}{2}i$,在复平面上,这个点位于第四象限,因为它的实部是正的($\\frac{1}{2}$),而虚部是负的($-\\frac{1}{2}$)\n答案选项:D'),
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
dict(role='BOT', prompt='回答: 将函数$y=\\tan(\\omega x-1)$(其中$\\omega>0$)的图像向左平移2个单位长度后,得到的新函数为$y=\\tan(\\omega(x+2)-1)$。为了使这个新函数的图像与$y=\\tan(\\omega x+3)$的图像重合,我们需要满足以下条件:\n$\\tan(\\omega(x+2)-1) = \\tan(\\omega x+3)$\n由于正切函数的周期性,我们知道$\\tan(\\theta) = \\tan(\\theta + k\\pi)$,其中$k$是任意整数。因此,我们可以写出:\n$\\omega(x+2)-1 = \\omega x+3 + k\\pi$\n解这个方程,我们得到:$\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$\n$2\\omega - 1 = 3 + k\\pi$\n$\\omega = 2 + \\frac{k\\pi}{2}$\n由于$\\omega>0$,我们需要找到满足条件的最小正整数$k$。当$k=0$时,我们得到$\\omega=2$,这是满足条件的最小正值。因此,最小值等于2。答案选项:D'),
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
dict(role='BOT', prompt='回答: 我们可以将这个方程写成:$a + 2ai + b = 2i$,然后将实部和虚部分开:\n$a + b = 0$\n$2a = 2$\n由于$a + b = 0$,我们可以得到$a = -b$。将这个值代入$2a = 2$,我们得到$2(-b) = 2$,因此$b = -1$。因此,$a = 1$。答案选项:A'),
dict(role='HUMAN', prompt='问题: 一直角三角形的两边长分别为3和4.则第三边的长为____\nA. 5\nB. $\\sqrt 7$\nC. $\\sqrt 5$\nD. 5或者$\\sqrt 7$'),
dict(role='BOT', prompt='回答: 我们可以使用勾股定理来解决这个问题。勾股定理告诉我们,如果一个三角形的两条边长分别为$a$和$b$,那么第三条边的长度为$c = \\sqrt{a^2 + b^2}$。在这个问题中,$a = 3$,$b = 4$,因此$c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$。答案选项:A'),
dict(role='HUMAN', prompt='问题: {question}'),
dict(role='BOT', prompt='回答: {answer}'),
],
'single_choice_en': [
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
dict(role='BOT', prompt='Response: D'),
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
dict(role='BOT', prompt='Response: D'),
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
dict(role='BOT', prompt='Response: A'),
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
dict(role='BOT', prompt='Response: B'),
dict(role='HUMAN', prompt='Question: {question}'),
dict(role='BOT', prompt='Response: {answer}'),
],
'single_choice_en_with_reasoning': [
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
dict(role='BOT', prompt='Response: First, we multiply both sides of the equation by the conjugate of $(3 + i)$: $z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$. We perform the multiplication: $z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$, $z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$. Since $i^2 = -1$, we can simplify this further: $z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{1}{2} - \\frac{1}{2}i$. In the complex plane, this point is located in the fourth quadrant, because its real part is positive ($\\frac{1}{2}$) and its imaginary part is negative ($-\\frac{1}{2}$)\nAnswer option: D'),
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
dict(role='BOT', prompt='Response: In order for the graph of this new function to coincide with the graph of $y = \\tan(\\omega x + 3)$, we need to satisfy the following condition: $\\tan(\\omega(x + 2) - 1) = \\tan(\\omega x + 3)$. Therefore, we can write: $\\omega(x + 2) - 1 = \\omega x + 3 + k\\pi$. Solving this equation, we get: $\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$. $2\\omega - 1 = 3 + k\\pi$. $\\omega = 2 + \\frac{k\\pi}{2}$. Since $\\omega > 0$, we need to find the smallest positive integer $k$ that satisfies the condition. When $k = 0$, we get $\\omega = 2$, which is the smallest positive value that satisfies the condition. Therefore, the minimum value is 2. Answer option: D'),
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
dict(role='BOT', prompt='Response: We can write this equation as: $a + 2ai + b = 2i$, and then separate the real and imaginary parts: $a + b = 0$. $2a = 2$. Since $a + b = 0$, we can get $a = -b$. Substituting this value into $2a = 2$, we get $2(-b) = 2$, so $b = -1$. Therefore, $a = 1$. Answer option: A'),
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
dict(role='BOT', prompt='Response: We can use the Pythagorean theorem to solve this problem. The Pythagorean theorem tells us that if the two sides of a triangle are $a$ and $b$, then the length of the third side is $c = \\sqrt{a^2 + b^2}$. In this problem, $a = 3$ and $b = 4$, so $c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$. Answer option: A'),
dict(role='HUMAN', prompt='Question: {question}'),
dict(role='BOT', prompt='Response: {answer}'),
],
'cloze_cn': [
dict(role='HUMAN', prompt='问题: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
dict(role='BOT', prompt='回答: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9'),
dict(role='HUMAN', prompt='问题: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
dict(role='BOT', prompt='回答: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29'),
dict(role='HUMAN', prompt='问题: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
dict(role='BOT', prompt='回答: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33'),
dict(role='HUMAN', prompt='问题: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
dict(role='BOT', prompt='回答: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8'),
dict(role='HUMAN', prompt='问题: {question}'),
dict(role='BOT', prompt='回答: {answer}'),
],
'cloze_en': [
dict(role='HUMAN', prompt='Question: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
dict(role='BOT', prompt='Response: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.'),
dict(role='HUMAN', prompt='Question: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
dict(role='BOT', prompt='Response: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.'),
dict(role='HUMAN', prompt='Question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
dict(role='BOT', prompt='Response: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.'),
dict(role='HUMAN', prompt='Question: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
dict(role='BOT', prompt='Response: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.'),
dict(role='HUMAN', prompt='Question: {question}'),
dict(role='BOT', prompt='Response: {answer}'),
],
}
mathbench_sets = {
# Practice Part
'college': ['single_choice_cn', 'single_choice_en'],
'high': ['single_choice_cn', 'single_choice_en'],
'middle': ['single_choice_cn', 'single_choice_en'],
'primary': ['cloze_cn', 'cloze_en'],
'arithmetic': ['cloze_en'],
# Theory part
'college_knowledge': ['single_choice_cn', 'single_choice_en'],
'high_knowledge': ['single_choice_cn', 'single_choice_en'],
'middle_knowledge': ['single_choice_cn', 'single_choice_en'],
'primary_knowledge': ['single_choice_cn', 'single_choice_en'],
}
from mmengine.config import read_base
with read_base():
from .medbench_gen_d44f24 import medbench_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import MedBenchDataset, MedBenchEvaluator, MedBenchEvaluator_Cloze, MedBenchEvaluator_CMeEE, MedBenchEvaluator_CMeIE, MedBenchEvaluator_CHIP_CDEE, MedBenchEvaluator_CHIP_CDN, MedBenchEvaluator_CHIP_CTC, MedBenchEvaluator_NLG, MedBenchEvaluator_TF, MedBenchEvaluator_DBMHG, MedBenchEvaluator_SMDoc, MedBenchEvaluator_IMCS_V2_MRG
from opencompass.utils.text_postprocessors import first_capital_postprocess
medbench_reader_cfg = dict(
input_columns=['problem_input'], output_column='label')
medbench_multiple_choices_sets = ['Med-Exam', 'DDx-basic', 'DDx-advanced', 'MedSafety'] # 选择题,用acc判断
medbench_qa_sets = ['MedHC', 'MedMC', 'MedDG', 'MedSpeQA', 'MedTreat', 'CMB-Clin'] # 开放式QA,有标答
medbench_cloze_sets = ['MedHG'] # 限定域QA,有标答
medbench_single_choice_sets = ['DrugCA'] # 正确与否判断,有标答
medbench_ie_sets = ['DBMHG', 'CMeEE', 'CMeIE', 'CHIP-CDEE', 'CHIP-CDN', 'CHIP-CTC', 'SMDoc', 'IMCS-V2-MRG'] # 判断识别的实体是否一致,用F1评价
medbench_datasets = []
for name in medbench_single_choice_sets + medbench_multiple_choices_sets:
medbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[dict(role='HUMAN', prompt='{problem_input}')])),
retriever=dict(type=ZeroRetriever
), # retriver 不起作用,以输入参数为准 (zero-shot / few-shot)
inferencer=dict(type=GenInferencer))
medbench_eval_cfg = dict(
evaluator=dict(type=MedBenchEvaluator), pred_role='BOT')
medbench_datasets.append(
dict(
type=MedBenchDataset,
path='./data/MedBench/' + name,
name=name,
abbr='medbench-' + name,
setting_name='zero-shot',
reader_cfg=medbench_reader_cfg,
infer_cfg=medbench_infer_cfg.copy(),
eval_cfg=medbench_eval_cfg.copy()))
for name in medbench_qa_sets:
medbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[dict(role='HUMAN', prompt='{problem_input}')])),
retriever=dict(type=ZeroRetriever
), # retriver 不起作用,以输入参数为准 (zero-shot / few-shot)
inferencer=dict(type=GenInferencer))
medbench_eval_cfg = dict(
evaluator=dict(type=MedBenchEvaluator_NLG), pred_role='BOT')
medbench_datasets.append(
dict(
type=MedBenchDataset,
path='./data/MedBench/' + name,
name=name,
abbr='medbench-' + name,
setting_name='zero-shot',
reader_cfg=medbench_reader_cfg,
infer_cfg=medbench_infer_cfg.copy(),
eval_cfg=medbench_eval_cfg.copy()))
for name in medbench_cloze_sets:
medbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[dict(role='HUMAN', prompt='{problem_input}')])),
retriever=dict(type=ZeroRetriever
), # retriver 不起作用,以输入参数为准 (zero-shot / few-shot)
inferencer=dict(type=GenInferencer))
medbench_eval_cfg = dict(
evaluator=dict(type=MedBenchEvaluator_Cloze), pred_role='BOT')
medbench_datasets.append(
dict(
type=MedBenchDataset,
path='./data/MedBench/' + name,
name=name,
abbr='medbench-' + name,
setting_name='zero-shot',
reader_cfg=medbench_reader_cfg,
infer_cfg=medbench_infer_cfg.copy(),
eval_cfg=medbench_eval_cfg.copy()))
for name in medbench_ie_sets:
medbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[dict(role='HUMAN', prompt='{problem_input}')])),
retriever=dict(type=ZeroRetriever
), # retriver 不起作用,以输入参数为准 (zero-shot / few-shot)
inferencer=dict(type=GenInferencer))
medbench_eval_cfg = dict(
evaluator=dict(type=eval('MedBenchEvaluator_'+name.replace('-', '_'))), pred_role='BOT')
medbench_datasets.append(
dict(
type=MedBenchDataset,
path='./data/MedBench/' + name,
name=name,
abbr='medbench-' + name,
setting_name='zero-shot',
reader_cfg=medbench_reader_cfg,
infer_cfg=medbench_infer_cfg.copy(),
eval_cfg=medbench_eval_cfg.copy()))
del name, medbench_infer_cfg, medbench_eval_cfg
from opencompass.datasets import MedCalc_BenchDataset, MedCalcOfficial_Evaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
ZERO_SHOT_PROMPT = 'You are a helpful assistant for calculating a score for a given patient note. Please think step-by-step to solve the question and then generate the required score. Your output should only contain a JSON dict formatted as {"step_by_step_thinking": str(your_step_by_step_thinking_procress_to_solve_the_question), "answer": str(short_and_direct_answer_of_the_question)}. \n Here is the patient note:\n{patient_note}\n\nHere is the task:\n{question}\n\nPlease directly output the JSON dict formatted as {"step_by_step_thinking": str(your_step_by_step_thinking_procress_to_solve_the_question), "answer": str(short_and_direct_answer_of_the_question)}:'
# Reader configuration
reader_cfg = dict(
input_columns=[
'row_number',
'calculator_id',
'calculator_name',
'category',
'note_id',
'output_type',
'note_type',
'patient_note',
'question',
'relevant_entities',
'ground_truth_answer',
'lower_limit',
'upper_limit',
'ground_truth_explanation'
],
output_column='ground_truth_answer',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN',prompt=ZERO_SHOT_PROMPT),
])
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=MedCalcOfficial_Evaluator),
pred_role='BOT',
)
medcal_bench_dataset = dict(
type=MedCalc_BenchDataset,
abbr='medcal_bench_official_zero_shot_eval',
path='ncbi/MedCalc-Bench-v1.0',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
medcal_bench_datasets = [medcal_bench_dataset]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
from opencompass.datasets.MedQA import MedQADataset
QUERY_TEMPLATE = """
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering.
Question:\n
{question}
Options:\n
{choices}
""".strip()
MedQA_datasets = []
MedQA_reader_cfg = dict(
input_columns=['question', 'choices'],
output_column='label',
)
MedQA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
MedQA_subsets = {
'US': 'xuxuxuxuxu/MedQA_US_test',
'Mainland': 'xuxuxuxuxu/MedQA_Mainland_test',
'Taiwan': 'xuxuxuxuxu/MedQA_Taiwan_test',
}
for split in list(MedQA_subsets.keys()):
MedQA_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD')
)
MedQA_datasets.append(
dict(
abbr=f'MedQA_{split}',
type=MedQADataset,
path=MedQA_subsets[split],
reader_cfg=MedQA_reader_cfg,
infer_cfg=MedQA_infer_cfg,
eval_cfg=MedQA_eval_cfg,
)
)
from mmengine.config import read_base
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets.MedQA import MedQADataset
QUERY_TEMPLATE = """
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering.
Question:\n
{question}
Options:\n
{choices}
""".strip()
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: {question}\n {choices} \n<Original Question End>\n\n
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
MedQA_datasets = []
MedQA_reader_cfg = dict(
input_columns=['question', 'choices'],
output_column='label',
)
MedQA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
MedQA_subsets = {
'US': 'xuxuxuxuxu/MedQA_US_test',
'Mainland': 'xuxuxuxuxu/MedQA_Mainland_test',
'Taiwan': 'xuxuxuxuxu/MedQA_Taiwan_test',
}
for split in list(MedQA_subsets.keys()):
MedQA_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=MedQADataset,
path=MedQA_subsets[split],
reader_cfg=MedQA_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
MedQA_datasets.append(
dict(
abbr=f'MedQA_{split}',
type=MedQADataset,
path=MedQA_subsets[split],
reader_cfg=MedQA_reader_cfg,
infer_cfg=MedQA_infer_cfg,
eval_cfg=MedQA_eval_cfg,
)
)
from opencompass.datasets import MedXpertQADataset, MedXpertQAEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
ZERO_SHOT_PROMPT = 'Q: {question}\nA: Among {start} through {end}, the answer is'
# Reader configuration
reader_cfg = dict(
input_columns=[
'question',
'options',
'medical_task',
'body_system',
'question_type',
'prompt_mode',
],
output_column='label',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
],
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=MedXpertQAEvaluator),
pred_role='BOT',
)
medxpertqa_dataset = dict(
type=MedXpertQADataset,
abbr='medxpertqa',
path='TsinghuaC3I/MedXpertQA',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
medxpertqa_datasets = [medxpertqa_dataset]
from opencompass.datasets import MedXpertQADataset, MedXpertQA_llmjudge_postprocess
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.evaluator import GenericLLMEvaluator
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
ZERO_SHOT_PROMPT = 'Q: {question}\nA: Among {start} through {end}, the answer is'
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: Q: {question}\nA: Among {start} through {end}, the answer is\n<Original Question End>\n\n
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Reader configuration
reader_cfg = dict(
input_columns=[
'question',
'options',
'medical_task',
'body_system',
'question_type',
'prompt_mode',
],
output_column='label',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
],
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=MedXpertQADataset,
path='TsinghuaC3I/MedXpertQA',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=MedXpertQA_llmjudge_postprocess),
),
)
medxpertqa_dataset = dict(
type=MedXpertQADataset,
abbr='medxpertqa',
path='TsinghuaC3I/MedXpertQA',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
medxpertqa_datasets = [medxpertqa_dataset]
from mmengine.config import read_base
with read_base():
from .medbullets_gen_60c8f5 import medbullets_datasets # noqa: F401, F403
\ No newline at end of file
from opencompass.datasets import MedbulletsDataset, MedbulletsEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
import os
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
# Reader configuration
reader_cfg = dict(
input_columns=[
'question',
'options',
'question_type',
'prompt_mode',
],
output_column='label',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
],
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=MedbulletsEvaluator),
pred_role='BOT',
)
medbullets_dataset = dict(
type=MedbulletsDataset,
abbr='medbullets',
path='opencompass/medbullets',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
medbullets_datasets = [medbullets_dataset]
from mmengine.config import read_base
with read_base():
from .medbullets_llmjudge_gen_60c8f5 import medbullets_datasets # noqa: F401, F403
\ No newline at end of file
from opencompass.datasets import MedbulletsDataset, medbullets_llmjudge_postprocess
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.evaluator import GenericLLMEvaluator
import os
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: Q: {question}\nPlease select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n\n<Original Question End>\n\n
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Reader configuration
reader_cfg = dict(
input_columns=[
'question',
'options',
'question_type',
'prompt_mode',
],
output_column='label',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
],
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=MedbulletsDataset,
path='opencompass/medbullets',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=medbullets_llmjudge_postprocess),
),
)
medbullets_dataset = dict(
type=MedbulletsDataset,
abbr='medbullets',
path='opencompass/medbullets',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
medbullets_datasets = [medbullets_dataset]
from mmengine.config import read_base
with read_base():
from .NPHardEval_gen_22aac5 import NPHardEval_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.NPHardEval import (
HardGCPDataset, HardGCPEvaluator,
Hard_TSP_Dataset, Hard_TSP_Evaluator,
Hard_MSP_Dataset, Hard_MSP_Evaluator,
CMP_GCP_D_Dataset, CMP_GCP_D_Evaluator,
CMP_TSP_D_Dataset, CMP_TSP_D_Evaluator,
CMP_KSP_Dataset, CMP_KSP_Evaluator,
P_BSP_Dataset, P_BSP_Evaluator,
P_EDP_Dataset, P_EDP_Evaluator,
P_SPP_Dataset, P_SPP_Evaluator,
)
NPHardEval_tasks = [
['hard_GCP', 'GCP', HardGCPDataset, HardGCPEvaluator],
['hard_TSP', 'TSP', Hard_TSP_Dataset, Hard_TSP_Evaluator],
['hard_MSP', 'MSP', Hard_MSP_Dataset, Hard_MSP_Evaluator],
['cmp_GCP_D', 'GCP_Decision', CMP_GCP_D_Dataset, CMP_GCP_D_Evaluator],
['cmp_TSP_D', 'TSP_Decision', CMP_TSP_D_Dataset, CMP_TSP_D_Evaluator],
['cmp_KSP', 'KSP', CMP_KSP_Dataset, CMP_KSP_Evaluator],
['p_BSP', 'BSP', P_BSP_Dataset, P_BSP_Evaluator],
['p_EDP', 'EDP', P_EDP_Dataset, P_EDP_Evaluator],
['p_SPP', 'SPP', P_SPP_Dataset, P_SPP_Evaluator],
]
NPHardEval_datasets = []
for name, path_name, dataset, evaluator in NPHardEval_tasks:
NPHardEval_reader_cfg = dict(input_columns=['prompt', 'level'], output_column='q')
NPHardEval_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(
begin='</E>',
round=[
dict(role='HUMAN', prompt='</E>{prompt}'),
dict(role='BOT', prompt=''),
],
),
ice_token='</E>',
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
NPHardEval_eval_cfg = dict(evaluator=dict(type=evaluator), pred_role='BOT')
NPHardEval_datasets.append(
dict(
type=dataset,
abbr=name,
path=f'./data/NPHardEval/{path_name}/',
reader_cfg=NPHardEval_reader_cfg,
infer_cfg=NPHardEval_infer_cfg,
eval_cfg=NPHardEval_eval_cfg,
)
)
# NPHardEval
## Introduction
The following introduction comes from the abstract in [NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes](https://arxiv.org/abs/2312.14890)
```
This benchmark is designed to evaluate the reasoning abilities of LLMs across a broad spectrum of 900 algorithmic questions, extending up to the NP-Hard complexity class. These questions are meticulously chosen to represent a wide range of complexity class below the NP-hard complexity class, offering a rigorous measure of the reasoning ability of LLMs.
```
## Official link
### Paper
[NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes](https://arxiv.org/abs/2312.14890)
### Repository
[NPHardEval](https://github.com/casmlab/NPHardEval)
## Examples
Input example I:
```
The Graph Coloring Problem (Decision Version, GCP-D) involves determining if it is possible to color the vertices of a graph using a given number of colors, ensuring no two adjacent vertices have the same color.
Find out if the vertices of a graph with 6 vertices can be colored using only 3 colors, such that no adjacent vertices share the same color.
Provide a yes or no answer, along with a concise explanation of your reasoning. Keep your explanation focused and brief.
Enclose your output in <root></root> tags. Include your reasoning in <reasoning></reasoning> tags and the final yes/no answer in <final_answer></final_answer> tags, like <final_answer>{'Feasible': 'YES_OR_NO'}</final_answer>.
The graph is below:
Vertex 2 is connected to vertex 4.
Vertex 2 is connected to vertex 5.
Vertex 3 is connected to vertex 5.
Vertex 4 is connected to vertex 5.
Vertex 5 is connected to vertex 6.
```
Output example I (from GPT-4):
```
<root>
<final_answer>{'Feasible': 'YES'}</final_answer>
<reasoning>Yes, it is possible to color the vertices of the given graph with 6 vertices using only 3 colors, such that no adjacent vertices share the same color. The graph does not contain any triangles or cycles of length greater than 3, which means it is a planar graph. By the Four Color Theorem, any planar graph can be colored using at most 4 colors. Since we have only 3 colors available, it is feasible to color the vertices without any adjacent vertices sharing the same color.</reasoning>
</root>
```
Input example II:
```
The Traveling Salesman Problem (Decision Version, TSP-D) focuses on determining if a salesman can complete a route, visiting each city at least once, with the total travel distance being less than a specified value.
Check if it's possible for a salesman to visit each of the 10 cities at least once and return to the starting city with the total distance less than 3481.5. The distances between each pair of cities are given.
Provide a yes or no answer, with a succinct explanation of your decision process. Focus on clarity and brevity in your response.
Enclose your output in <root></root> tags. Present your reasoning in <reasoning></reasoning> tags and the final yes/no answer in <final_answer></final_answer> tags, like <final_answer>{'Feasible': 'YES_OR_NO'}</final_answer>.
The distances between cities are below:
The distance between City 0 and City 1 is 58.0.
The distance between City 0 and City 2 is 171.0.
The distance between City 0 and City 3 is 122.0.
The distance between City 0 and City 4 is 109.0.
The distance between City 0 and City 5 is 93.0.
The distance between City 0 and City 6 is 106.0.
The distance between City 0 and City 7 is 52.0.
The distance between City 0 and City 8 is 115.0.
The distance between City 0 and City 9 is 148.0.
The distance between City 1 and City 2 is 145.0.
The distance between City 1 and City 3 is 71.0.
The distance between City 1 and City 4 is 114.0.
The distance between City 1 and City 5 is 69.0.
The distance between City 1 and City 6 is 163.0.
The distance between City 1 and City 7 is 132.0.
The distance between City 1 and City 8 is 183.0.
The distance between City 1 and City 9 is 115.0.
The distance between City 2 and City 3 is 107.0.
The distance between City 2 and City 4 is 163.0.
The distance between City 2 and City 5 is 63.0.
The distance between City 2 and City 6 is 91.0.
The distance between City 2 and City 7 is 108.0.
The distance between City 2 and City 8 is 109.0.
The distance between City 2 and City 9 is 77.0.
The distance between City 3 and City 4 is 119.0.
The distance between City 3 and City 5 is 131.0.
The distance between City 3 and City 6 is 79.0.
The distance between City 3 and City 7 is 159.0.
The distance between City 3 and City 8 is 27.0.
The distance between City 3 and City 9 is 64.0.
The distance between City 4 and City 5 is 85.0.
The distance between City 4 and City 6 is 110.0.
The distance between City 4 and City 7 is 71.0.
The distance between City 4 and City 8 is 61.0.
The distance between City 4 and City 9 is 61.0.
The distance between City 5 and City 6 is 73.0.
The distance between City 5 and City 7 is 132.0.
The distance between City 5 and City 8 is 82.0.
The distance between City 5 and City 9 is 96.0.
The distance between City 6 and City 7 is 156.0.
The distance between City 6 and City 8 is 103.0.
The distance between City 6 and City 9 is 69.0.
The distance between City 7 and City 8 is 108.0.
The distance between City 7 and City 9 is 97.0.
The distance between City 8 and City 9 is 75.0.
```
Output example II (from GPT-4):
```
<root> <reasoning> The Traveling Salesman Problem is a well-known problem in the field of computer science and operations research. It is a combinatorial optimization problem, which means it involves finding the best solution from a finite set of possible solutions. The problem is NP-hard, which means there is no known algorithm that can solve it in polynomial time. Therefore, to solve this problem, we would need to use an algorithm that can find the optimal solution, such as a brute force algorithm, a genetic algorithm, or a simulated annealing algorithm. However, these algorithms can be computationally expensive, especially for large problem sizes. In this case, we have 10 cities, which means there are 10! = 3,628,800 possible routes. Therefore, without the use of a computer, it would be impractical to manually check all possible routes to find the one with the total distance less than 3481.5. </reasoning> <final_answer>{'Feasible': 'UNKNOWN'}</final_answer> </root>
```
## Evaluation results
```
dataset version metric mode internlm2-chat-7b-hf
--------- --------- ----------------- ------ ----------------------
hard_GCP 144a59 Weighted Accuracy gen 1.64
hard_TSP 144a59 Weighted Accuracy gen 0
hard_MSP 144a59 Weighted Accuracy gen 0
cmp_GCP_D 144a59 Weighted Accuracy gen 43.82
cmp_TSP_D 144a59 Weighted Accuracy gen 40.18
cmp_KSP 144a59 Weighted Accuracy gen 0
p_BSP 144a59 Weighted Accuracy gen 40.36
p_EDP 144a59 Weighted Accuracy gen 0
p_SPP 144a59 Weighted Accuracy gen 0
```
## Reference
```
@article{fan2023nphardeval,
title={NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes},
author={Fan, Lizhou and Hua, Wenyue and Li, Lingyao and Ling, Haoyang and Zhang, Yongfeng and Hemphill, Libby},
journal={arXiv preprint arXiv:2312.14890},
year={2023}
}
```
# OlymMATH
[GitHub Link](https://github.com/RUCAIBox/OlymMATH)
Dataset OlymMATH, please refer to the paper:
Challenging the Boundaries of Reasoning: An Olympiad-Level Math Benchmark for Large Language Models by Haoxiang Sun, Yingqian Min, Zhipeng Chen, Wayne Xin Zhao, Zheng Liu, Zhongyuan Wang, Lei Fang, and Ji-Rong Wen.
## How to eval OlymMATH with model judge
This is a simple example:
```python
from opencompass.models import OpenAISDK, OpenAI
from mmengine.config import read_base
with read_base():
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_7b_instruct import models as qwen2_5_7b_instruct_model
from opencompass.configs.datasets.OlymMATH.olymmath_gen import olymmath_datasets
################## Judge Config ##################
api_meta_template = dict(round=[
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
], )
judge_cfg = dict(
# An API model with OpenAI API format is required for Judge
abbr='qwen2-5-32B-Instruct',
type=OpenAISDK,
path='Qwen/Qwen2.5-32B-Instruct',
key='sk-1234',
openai_api_base=[
'http://172.30.56.1:4000/v1',
],
meta_template=api_meta_template,
query_per_second=16,
batch_size=1024,
temperature=0.001,
max_completion_tokens=32768,
tokenizer_path='gpt-4o-2024-05-13',
verbose=True,
max_out_len=16384,
max_seq_len=32768,
)
################## Model Config ##################
models = [*qwen2_5_7b_instruct_model]
################## Dataset Config ##################
datasets = [*olymmath_datasets]
# Set judge_cfg for evaluation
for item in datasets:
item['infer_cfg']['inferencer']['max_out_len'] = 32768
if 'judge_cfg' in item['eval_cfg']['evaluator']:
item['eval_cfg']['evaluator']['judge_cfg'] = judge_cfg
work_dir = './outputs/olymmath_llm_eval'
```
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import OlymMATHDataset
from opencompass.evaluator import (
CascadeEvaluator,
GenericLLMEvaluator,
MATHVerifyEvaluator
)
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='answer', train_split='test')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
sub_sets = ['en-hard', 'zh-hard', 'en-easy', 'zh-easy']
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Evaluation configuration
olymmath_datasets = []
for sub_set in sub_sets:
math_eval_cfg = dict(
evaluator=dict(
type=CascadeEvaluator,
rule_evaluator=dict(
type=MATHVerifyEvaluator,
),
llm_evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=OlymMATHDataset,
path='RUC-AIBOX/OlymMATH',
reader_cfg=math_reader_cfg,
subset=sub_set,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
parallel=False,
),
)
olymmath_datasets.append(
dict(
type=OlymMATHDataset,
abbr=f'olymmath_{sub_set}',
path='RUC-AIBOX/OlymMATH',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
subset=sub_set,
n=1
)
)
from mmengine.config import read_base
with read_base():
# Default use LLM as a judge
from .olymmath_llmverify_gen_97b203 import olymmath_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import OlymMATHDataset
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='answer', train_split='test')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
sub_sets = ['en-hard', 'zh-hard', 'en-easy', 'zh-easy']
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Evaluation configuration
olymmath_datasets = []
for sub_set in sub_sets:
math_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=OlymMATHDataset,
path='RUC-AIBOX/OlymMATH',
reader_cfg=math_reader_cfg,
subset=sub_set,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
olymmath_datasets.append(
dict(
type=OlymMATHDataset,
abbr=f'olymmath_llmjudge_{sub_set}',
path='RUC-AIBOX/OlymMATH',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
subset=sub_set,
)
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment