Unverified Commit 32f40a8f authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Sync] Sync with internal codes 2023.01.08 (#777)

parent 8194199d
...@@ -91,10 +91,32 @@ docs/zh_cn/_build/ ...@@ -91,10 +91,32 @@ docs/zh_cn/_build/
# sft config ignore list # sft config ignore list
configs/sft_cfg/*B_* configs/sft_cfg/*B_*
configs/sft_cfg/7B/*
configs/sft_cfg/20B/*
configs/cky/ configs/cky/
# in case llama clone in the opencompass
llama/
# path of turbomind's model after runing `lmdeploy.serve.turbomind.deploy` # in case ilagent clone in the opencompass
turbomind/ ilagent/
# ignore the config file for criticbench evaluation # ignore the config file for criticbench evaluation
configs/sft_cfg/criticbench_eval/* configs/sft_cfg/criticbench_eval/*
# path of turbomind's model after runing `lmdeploy.serve.turbomind.deploy`
turbomind/
# cibench output
*.db
*.pth
*.pt
*.onnx
*.gz
*.gz.*
*.png
*.txt
*.jpg
*.json
*.csv
*.npy
*.c
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import AgentInferencer
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
cibench_reader_cfg = dict(
input_columns=["questions"],
output_column="references",
train_split='test',
test_split='test')
cibench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template="""{questions}""",
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer, infer_mode='every'),
)
libs = ['Pandas', 'Matplotlib', 'Opencv', 'SciPy', 'Seaborn', 'PyTorch']
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
cibench_datasets = [
dict(
abbr=f"cibench_generation_{lib}",
type=CIBenchDataset,
path=f"./data/cibench/{lib}",
reader_cfg=cibench_reader_cfg,
infer_cfg=cibench_infer_cfg,
eval_cfg=cibench_eval_cfg,
) for lib in libs
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import AgentInferencer
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
from opencompass.utils.text_postprocessors import first_option_postprocess
PROMPT_EN = {
"FEWSHOT_INSTRUCTION_CLOZE" : [
dict(role='HUMAN', prompt='Mark\'s basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What\'s the total number of points scored by both teams added together?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result'),
dict(role='SYSTEM', prompt='Response:210'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 210'),
dict(role='HUMAN', prompt='Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result'),
dict(role='SYSTEM', prompt='Response:140'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 140'),
dict(role='HUMAN', prompt='A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:def solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result"""),
dict(role='SYSTEM', prompt='Response:146'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 146'),
dict(role='HUMAN', prompt='{question}'),
],
"FEWSHOT_INSTRUCTION_CHOICE" : [
dict(role='HUMAN', prompt='Given point P(-1,4) lies on the graph of the inverse proportionality function $y=\\frac{{k}}{{x}}$ (k≠0), what is the value of k? A. $-\\frac{{1}}{{4}}$ B. $\\frac{{1}}{{4}}$ C. $4$ D. $-4$'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result'),
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
dict(role='BOT', prompt='Thought: Substituting point P into the function yields the value of k as $-4$\nFinalAnswer: D'),
dict(role='HUMAN', prompt='The graph of the power function $y=(x)$ passes through the point$ (2, \\dfrac {{1}}{{4}}) $, what is the value of $f(-3)$? A. $\\frac{{1}}{{9}}$ B. $\\frac{{1}}{{8}})=196-x$ C. $\\frac{{2}}{{9}}$ D. $\\frac{{1}}{{4}}$'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result'),
dict(role='SYSTEM', prompt='Response:1/9'),
dict(role='BOT', prompt='Thought: The functional expression of the power function is $y=x^{{-2}}$. Substituting $x=-3$ yields $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
dict(role='HUMAN', prompt='If $3 x-y=12$, what is the value of $\\frac{8^{x}}{2^{y}} ?$\nA. The value cannot be determined from the information given.\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result'),
dict(role='SYSTEM', prompt='Response:2**12'),
dict(role='BOT', prompt='Thought: The value of $\\frac{8^{x}}{2^{y}}$ is $2^{12}$\nFinalAnswer: B'),
dict(role='HUMAN', prompt='{question}'),
]
}
PROMPT_CN = {
"FEWSHOT_INSTRUCTION_CLOZE" : [
dict(role='HUMAN', prompt='Mark的篮球队得到25个2分球,8个3分球和10个罚球。他们的对手得到2分球的两倍,但3分球和罚球的一半。两队得分的总和是多少?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result'),
dict(role='SYSTEM', prompt='Response:210'),
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 210'),
dict(role='HUMAN', prompt='Bella有两倍于飞盘的弹珠。她还比卡片多20个飞盘。如果她买每种物品多2/5,她会有多少总数的物品,如果她现在有60颗弹珠?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:def solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result'),
dict(role='SYSTEM', prompt='Response:140'),
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 140'),
dict(role='HUMAN', prompt='一个有4个水果篮子,前三个篮子里有9个苹果、15个橙子和14个香蕉,第四个篮子里每种水果都少2个。总共有多少水果?'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:def solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result"""),
dict(role='SYSTEM', prompt='Response:146'),
dict(role='BOT', prompt='Thought: 根据回答,我得到了答案\nFinalAnswer: 146'),
dict(role='HUMAN', prompt='{question}'),
],
"FEWSHOT_INSTRUCTION_CHOICE" : [
dict(role='HUMAN', prompt='已知点P(-1,4)在反比例函数$y=\\frac{{k}}{{x}}$ (k≠0)的图象上,则k的值是____'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import solve, symbols, Ne, Eq\ndef solution():\n k = symbols(\'k\')\n result = solve([Eq(4, k / (-1)), Ne(k, 0)], k, dict=True)\n return result'),
dict(role='SYSTEM', prompt='Response:{{Eq(k, -4)}}'),
dict(role='BOT', prompt='Thought: 将点 P 带入函数解出 k 的值为 $-4$\nFinalAnswer: D'),
dict(role='HUMAN', prompt='幂函数$ y=(x) $的图象经过点$ (2, \\dfrac {{1}}{{4}}) $,则$ f(-3) $的值为 ______ .'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import *\ndef solution():\n x, y, k = symbols(\'x y k\')\n eq1 = Eq(2**k, Rational(1, 4))\n k_value = solve(eq1, k)[0]\n y = x**k_value\n result = y.subs(x, -3)\n return result'),
dict(role='SYSTEM', prompt='Response:1/9'),
dict(role='BOT', prompt='Thought: 求出幂函数的函数表达式为 $y=x^{{-2}}$,代入 $x=-3$ 得到 $y=$\\frac{{1}}{{9}}$\nFinalAnswer: A'),
dict(role='HUMAN', prompt='如果$3 x-y=12$,则$\\frac{8^{x}}{2^{y}}$的值是多少?\nA. 无法从给定的信息中确定值。\nB. $2^{12}$\nC. 4\nD. $8^{2}$'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:from sympy import symbols, Eq, solve\n\ndef sloution():\n x, y = symbols(\'x y\')\n equation = Eq(3*x - y, 12)\n\n y_in_terms_of_x = solve(equation, y)[0]\n expression = 8**x / 2**y_in_terms_of_x\n result = expression.simplify()\n return result'),
dict(role='SYSTEM', prompt='Response:2**12'),
dict(role='BOT', prompt='Thought: $\\frac{8^{x}}{2^{y}}$的值是$2^{12}$\nFinalAnswer: B'),
dict(role='HUMAN', prompt='{question}'),
]
}
mathbench_sets = {
'college': ['single_choice_cn', 'cloze_en'],
'high': ['single_choice_cn', 'single_choice_en'],
'middle': ['single_choice_cn'],
'primary': ['cloze_cn']
}
# Use circular evaluation or not
with_circular_eval = True
mathbench_agent_datasets = []
for _split in list(mathbench_sets.keys()):
for _name in mathbench_sets[_split]:
prompt_example = PROMPT_CN if '_cn' in _name else PROMPT_EN
mathbench_infer_cfg = dict(
prompt_template=dict(type=PromptTemplate,
template=dict(
round = prompt_example["FEWSHOT_INSTRUCTION_CLOZE"] if 'cloze' in _name else prompt_example["FEWSHOT_INSTRUCTION_CHOICE"])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer)
)
mathbench_eval_cfg = dict(
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
mathbench_agent_datasets.append(
dict(
abbr="mathbench-" + _split + '-' + _name + '-agent',
type=MathBenchDataset,
path=f"./data/mathbench/{_split}",
name=_name,
with_circular=with_circular_eval,
reader_cfg=dict(
input_columns=["question"],
output_column="answer"
),
infer_cfg=mathbench_infer_cfg,
eval_cfg=mathbench_eval_cfg,
))
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
cloze_prompts ={
"cloze_arith_en": [
dict(role='HUMAN', prompt='Q: Calculate (341/11)/(9/(-6)*(-2)/3).'),
dict(role='BOT', prompt='A: First, (9/(-6)*(-2)/3) can be simplified by : 9/(-6) = -1.5, -1.5 * (-2) = 3, 3 / 3 = 1. So, (9/(-6)*(-2)/3) is equal to 1. Now, we have `(341/11)/1` equals `341/11`. Finally, calculate `341/11 = 31`. The answer is 31.\n'),
dict(role='HUMAN', prompt='Q: In base 14, what is 5 - 638d8d?'),
dict(role='BOT', prompt='A: 5 - 638d8d = -638d88. The answer is -638d88.\n'),
dict(role='HUMAN', prompt='Q: What is -491354 times -0.34?'),
dict(role='BOT', prompt='A: The product of -491354 and -0.34 is 167060.36. The answer is 167060.36.\n'),
dict(role='HUMAN', prompt='Q: What is the value of (-55)/(6930/(-382)) + (0 - 3)?.'),
dict(role='BOT', prompt='A: First, (-55)/(6930/(-382)) = (-55)/(-(6930/382)) = 55*382/6930 = 21010/6930 = 2101/693. Then, 2101/693 + (0 - 3) = 2101/693 - 3 = 2101/693 - 3*693/693 = (2101-2079)/693 = 22/693 = 2/63. The answer is 2/63.\n'),
dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A: {answer}\n'),
]
}
mathbench_sets = {
'arithmetic': ['cloze_arith_en'],
}
mathbench_datasets = []
for _split in list(mathbench_sets.keys()):
for _name in mathbench_sets[_split]:
mathbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=cloze_prompts[_name],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
mathbench_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_postprocessor=dict(type=mathbench_postprocess, name=_name))
mathbench_datasets.append(
dict(
type=MathBenchDataset,
path=f"./data/mathbench/{_split}",
name=_name,
with_circular=False,
abbr="mathbench-arithmetic" + _split + '-' + _name,
reader_cfg=dict(
input_columns=["question"],
output_column="answer"
),
infer_cfg=mathbench_infer_cfg,
eval_cfg=mathbench_eval_cfg,
))
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
from opencompass.utils.text_postprocessors import first_option_postprocess
single_choice_prompts = {
"single_choice_cn_with_reasoning": "以下是一道关于数学的单项选择题,请你一步一步推理并得到最终的答案选项。回答格式为如下:\n答案选项:A、B、C、D中你认为正确的一个选项\n计算过程:根据题目得到选项答案的一步步过程\n请严格按照上面的格式回答问题,下面是你要回答的题目:\n{question}\n答案选项:",
"single_choice_cn": "以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n答案选项:",
"single_choice_en_with_reasoning": "Here is a multiple-choice question about mathematics. Please provide the final answer option by step-by-step reasoning. Please answer in the following format:\nAnswer option: A, B, C, or D (the option you believe is correct)\nCalculation process: Step-by-step process to derive the answer option based on the question\nPlease strictly follow the above format to answer the question. Here is the question you need to answer:\n{question}\nAnswer option:",
"single_choice_en": "Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nAnswer option:",
}
cloze_prompts = {
"cloze_cn": [
dict(role='HUMAN', prompt='Q: 林中有15棵树。林务工人员今天将在林中种植树木。完成后,将有21棵树。林务工人员今天种植了多少棵树?'),
dict(role='BOT', prompt='A: 我们从15棵树开始。后来有21棵树。差值必定是他们种植的树木数量。所以,他们必须种植了21 - 15 = 6棵树。答案是 6\n'),
dict(role='HUMAN', prompt='Q: 如果停车场有3辆车,又有2辆车进来,停车场里有多少辆车?'),
dict(role='BOT', prompt='A: 停车场已经有3辆车。又进来了2辆车。现在有3 + 2 = 5辆车。答案是 5\n'),
dict(role='HUMAN', prompt='Q: Leah有32块巧克力,她的妹妹有42块。如果他们吃了35块,他们总共剩下多少块?'),
dict(role='BOT', prompt='A: Leah有32块巧克力,Leah的妹妹有42块。这意味着原本有32 + 42 = 74块巧克力。被吃掉了35块。所以他们总共还剩下74 - 35 = 39块巧克力。答案是 39\n'),
dict(role='HUMAN', prompt='Q: Jason有20个棒棒糖。他给Denny一些棒棒糖。现在Jason只剩下12个棒棒糖。Jason给Denny多少个棒棒糖?'),
dict(role='BOT', prompt='A: Jason有20个棒棒糖。因为他现在只剩下12个,所以他必须把剩下的都给了Denny。他给Denny的棒棒糖数量必定是20 - 12 = 8个。答案是 8\n'),
dict(role='HUMAN', prompt='Q: Shawn有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
dict(role='BOT', prompt='A: 他有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9\n'),
dict(role='HUMAN', prompt='Q: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
dict(role='BOT', prompt='A: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29\n'),
dict(role='HUMAN', prompt='Q: Michael有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
dict(role='BOT', prompt='A: Michael一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33\n'),
dict(role='HUMAN', prompt='Q: Olivia有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
dict(role='BOT', prompt='A: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8\n'),
dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A: {answer}'),
],
"cloze_en": [
dict(role='HUMAN', prompt='Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?'),
dict(role='BOT', prompt='A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.\n'),
dict(role='HUMAN', prompt='Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?'),
dict(role='BOT', prompt='A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n'),
dict(role='HUMAN', prompt='Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?'),
dict(role='BOT', prompt="A: Leah had 32 chocolates and Leah's sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.\n"),
dict(role='HUMAN', prompt='Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?'),
dict(role='BOT', prompt='A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.\n'),
dict(role='HUMAN', prompt='Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
dict(role='BOT', prompt='A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.\n'),
dict(role='HUMAN', prompt='Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
dict(role='BOT', prompt='A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.\n'),
dict(role='HUMAN', prompt='Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
dict(role='BOT', prompt='A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.\n'),
dict(role='HUMAN', prompt='Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
dict(role='BOT', prompt='A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.\n'),
dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A: {answer}\n'),
]}
mathbench_sets = {
'college': ['single_choice_cn', 'cloze_en'],
'high': ['single_choice_cn', 'single_choice_en'],
'middle': ['single_choice_cn'],
'primary': ['cloze_cn']
}
# Generate reasoning path if set True or just generate the final answer
with_reasoning = False
# Use circular evaluation or not
with_circular_eval = True
mathbench_datasets = []
for _split in list(mathbench_sets.keys()):
for _name in mathbench_sets[_split]:
mathbench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role="HUMAN",
prompt=single_choice_prompts[_name + "_with_reasoning"] if with_reasoning else single_choice_prompts[_name],
),
dict(role="BOT", prompt="{answer}")] if 'choice' in _name else cloze_prompts[_name],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
mathbench_eval_cfg = dict(
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
mathbench_datasets.append(
dict(
abbr="mathbench-" + _split + '-' + _name,
type=MathBenchDataset,
path=f"./data/mathbench/{_split}",
name=_name,
with_circular=with_circular_eval,
reader_cfg=dict(
input_columns=["question"],
output_column="answer"
),
infer_cfg=mathbench_infer_cfg,
eval_cfg=mathbench_eval_cfg,
))
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import FixKRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.utils.text_postprocessors import first_capital_postprocess
from opencompass.datasets.reasonbench import ReasonBenchDataset
reasonbench_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_postprocessor=dict(type=first_capital_postprocess)
)
reader_cfgs = []
for i in range(2, 5):
choices = ["A", "B", "C", "D"][:i]
reader_cfgs.append(dict(
input_columns=["prompt_ppl"],
output_column="label_ppl")
)
infer_cfg=dict(
ice_template=dict(
type=PromptTemplate,
template=dict(
begin="</E>",
round=[
dict(
role="HUMAN",
prompt="</E>{prompt_ppl}"
),
dict(role="BOT", prompt="Answer: {label_ppl}"),
]),
ice_token="</E>",
),
retriever=dict(type=FixKRetriever, fix_id_list=[]),
inferencer=dict(type=GenInferencer)
)
CausalReasoningDataset = [
dict(
abbr="reasonbench-causal",
type=ReasonBenchDataset,
path="data/reasonbench/causal.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
CommonsenseReasoningDataset = [
dict(
abbr="reasonbench-commonsense",
type=ReasonBenchDataset,
path="data/reasonbench/commonsense.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
AbductiveReasoningDataset = [
dict(
abbr="reasonbench-abductive",
type=ReasonBenchDataset,
path="data/reasonbench/abductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
DeductiveReasoningDataset = [
dict(
abbr="reasonbench-deductive",
type=ReasonBenchDataset,
path="data/reasonbench/deductive.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
InductiveReasoningDataset = [
dict(
abbr="reasonbench-inductive",
type=ReasonBenchDataset,
path="data/reasonbench/inductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
SymbolicReasoningDataset = [
dict(
abbr="reasonbench-symbolic",
type=ReasonBenchDataset,
path="data/reasonbench/symbolic.jsonl",
reader_cfg=reader_cfgs[2],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_CommonsenseReasoningDataset = [
dict(
abbr="reasonbench-cleva_commonsense",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_commonsense.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_DeductiveReasoningDataset = [
dict(
abbr="reasonbench-cleva_deductive",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_deductive.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_InductiveReasoningDataset = [
dict(
abbr="reasonbench-cleva_inductive",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_inductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfg,
eval_cfg=reasonbench_eval_cfg),
]
reasonbench_datasets = \
CLEVA_CommonsenseReasoningDataset + \
CLEVA_DeductiveReasoningDataset + \
CLEVA_InductiveReasoningDataset + \
CausalReasoningDataset + \
CommonsenseReasoningDataset + \
AbductiveReasoningDataset + \
DeductiveReasoningDataset + \
InductiveReasoningDataset + \
SymbolicReasoningDataset
from mmengine.config import read_base
with read_base():
from .reasonbench_ppl_b4a005 import reasonbench_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets.reasonbench import ReasonBenchDataset
reasonbench_eval_cfg = dict(
evaluator=dict(type=AccEvaluator),
pred_role="BOT",
)
reader_cfgs, infer_cfgs = [], []
for i in range(2, 5):
choices = ["A", "B", "C", "D"][:i]
reader_cfgs.append(dict(
input_columns=["prompt_ppl"] + choices + ["choices"],
output_column="label")
)
infer_cfgs.append(dict(
prompt_template=dict(
type=PromptTemplate,
template={
str(id):
dict(
round=[
dict(role="HUMAN", prompt="{prompt_ppl}Answer:"),
dict(role="BOT", prompt=f"{choice}")
], )
for id, choice in enumerate(choices)
}),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer)
))
CausalReasoningDataset = [
dict(
abbr="reasonbench-causal",
type=ReasonBenchDataset,
path="data/reasonbench/causal.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfgs[0],
eval_cfg=reasonbench_eval_cfg),
]
CommonsenseReasoningDataset = [
dict(
abbr="reasonbench-commonsense",
type=ReasonBenchDataset,
path="data/reasonbench/commonsense.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfgs[1],
eval_cfg=reasonbench_eval_cfg),
]
AbductiveReasoningDataset = [
dict(
abbr="reasonbench-abductive",
type=ReasonBenchDataset,
path="data/reasonbench/abductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfgs[0],
eval_cfg=reasonbench_eval_cfg),
]
DeductiveReasoningDataset = [
dict(
abbr="reasonbench-deductive",
type=ReasonBenchDataset,
path="data/reasonbench/deductive.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfgs[1],
eval_cfg=reasonbench_eval_cfg),
]
InductiveReasoningDataset = [
dict(
abbr="reasonbench-inductive",
type=ReasonBenchDataset,
path="data/reasonbench/inductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfgs[0],
eval_cfg=reasonbench_eval_cfg),
]
SymbolicReasoningDataset = [
dict(
abbr="reasonbench-symbolic",
type=ReasonBenchDataset,
path="data/reasonbench/symbolic.jsonl",
reader_cfg=reader_cfgs[2],
infer_cfg=infer_cfgs[2],
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_CommonsenseReasoningDataset = [
dict(
abbr="reasonbench-cleva_commonsense",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_commonsense.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfgs[1],
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_DeductiveReasoningDataset = [
dict(
abbr="reasonbench-cleva_deductive",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_deductive.jsonl",
reader_cfg=reader_cfgs[1],
infer_cfg=infer_cfgs[1],
eval_cfg=reasonbench_eval_cfg),
]
CLEVA_InductiveReasoningDataset = [
dict(
abbr="reasonbench-cleva_inductive",
type=ReasonBenchDataset,
path="data/reasonbench/cleva_inductive.jsonl",
reader_cfg=reader_cfgs[0],
infer_cfg=infer_cfgs[0],
eval_cfg=reasonbench_eval_cfg),
]
reasonbench_datasets = \
CLEVA_CommonsenseReasoningDataset + \
CLEVA_DeductiveReasoningDataset + \
CLEVA_InductiveReasoningDataset + \
CausalReasoningDataset + \
CommonsenseReasoningDataset + \
AbductiveReasoningDataset + \
DeductiveReasoningDataset + \
InductiveReasoningDataset + \
SymbolicReasoningDataset
...@@ -47,7 +47,7 @@ with read_base(): ...@@ -47,7 +47,7 @@ with read_base():
from ..piqa.piqa_ppl_1cf9f0 import piqa_datasets from ..piqa.piqa_ppl_1cf9f0 import piqa_datasets
from ..siqa.siqa_ppl_ced5f6 import siqa_datasets from ..siqa.siqa_ppl_ced5f6 import siqa_datasets
from ..strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets from ..strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets
from ..winogrande.winogrande_ppl_55a66e import winogrande_datasets from ..winogrande.winogrande_ll_c5cf57 import winogrande_datasets
from ..obqa.obqa_ppl_c7c154 import obqa_datasets from ..obqa.obqa_ppl_c7c154 import obqa_datasets
from ..nq.nq_gen_c788f6 import nq_datasets from ..nq.nq_gen_c788f6 import nq_datasets
from ..triviaqa.triviaqa_gen_2121ce import triviaqa_datasets from ..triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
......
...@@ -47,7 +47,7 @@ with read_base(): ...@@ -47,7 +47,7 @@ with read_base():
from ..piqa.piqa_ppl_0cfff2 import piqa_datasets from ..piqa.piqa_ppl_0cfff2 import piqa_datasets
from ..siqa.siqa_ppl_e8d8c5 import siqa_datasets from ..siqa.siqa_ppl_e8d8c5 import siqa_datasets
from ..strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets from ..strategyqa.strategyqa_gen_1180a7 import strategyqa_datasets
from ..winogrande.winogrande_ppl_55a66e import winogrande_datasets from ..winogrande.winogrande_ll_c5cf57 import winogrande_datasets
from ..obqa.obqa_ppl_6aac9e import obqa_datasets from ..obqa.obqa_ppl_6aac9e import obqa_datasets
from ..nq.nq_gen_0356ec import nq_datasets from ..nq.nq_gen_0356ec import nq_datasets
from ..triviaqa.triviaqa_gen_0356ec import triviaqa_datasets from ..triviaqa.triviaqa_gen_0356ec import triviaqa_datasets
......
...@@ -30,7 +30,7 @@ with read_base(): ...@@ -30,7 +30,7 @@ with read_base():
from ..summedits.summedits_ppl_1fbeb6 import summedits_datasets from ..summedits.summedits_ppl_1fbeb6 import summedits_datasets
from ..hellaswag.hellaswag_ppl_47bff9 import hellaswag_datasets from ..hellaswag.hellaswag_ppl_47bff9 import hellaswag_datasets
from ..piqa.piqa_ppl_1cf9f0 import piqa_datasets from ..piqa.piqa_ppl_1cf9f0 import piqa_datasets
from ..winogrande.winogrande_ppl_55a66e import winogrande_datasets from ..winogrande.winogrande_ll_c5cf57 import winogrande_datasets
from ..obqa.obqa_ppl_c7c154 import obqa_datasets from ..obqa.obqa_ppl_c7c154 import obqa_datasets
from ..nq.nq_gen_c788f6 import nq_datasets from ..nq.nq_gen_c788f6 import nq_datasets
from ..triviaqa.triviaqa_gen_2121ce import triviaqa_datasets from ..triviaqa.triviaqa_gen_2121ce import triviaqa_datasets
......
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import AgentInferencer
from opencompass.datasets import (
GSM8KDataset,
gsm8k_postprocess,
gsm8k_dataset_postprocess,
Gsm8kAgentEvaluator,
)
gsm8k_reader_cfg = dict(input_columns=["question"], output_column="answer")
gsm8k_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
# # ################################### NEW SHOT ###################################
dict(role='HUMAN', prompt='Mark\'s basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What\'s the total number of points scored by both teams added together?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n mark_pointers_2 = 25 * 2\n mark_pointers_3 = 8 * 3\n mark_free_throws = 10 * 1\n mark_points_scored = mark_pointers_2 + mark_pointers_3 + mark_free_throws\n opponents_pointers_2 = mark_pointers_2 * 2\n opponents_pointers_3 = mark_pointers_3 / 2\n opponents_free_throws = mark_free_throws / 2\n opponents_points_scored = opponents_pointers_2 + opponents_pointers_3 + opponents_free_throws\n total_points_scored = mark_points_scored + opponents_points_scored\n result = total_points_scored\n return result\n```'),
dict(role='SYSTEM', prompt='Response:210'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 210'),
dict(role='HUMAN', prompt='Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?'),
dict(role='BOT', prompt='Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n marbles = 60\n num_increased_marbles = marbles * 2 / 5\n num_total_marbles = marbles + num_increased_marbles\n frisbees = marbles / 2\n num_increased_frisbees = frisbees * 2 / 5\n num_total_frisbees = frisbees + num_increased_frisbees\n deck_cards = frisbees - 20\n num_increased_deck_cards = deck_cards * 2 / 5\n num_total_deck_cards = deck_cards + num_increased_deck_cards\n num_total = num_total_marbles + num_total_frisbees + num_total_deck_cards\n result = num_total\n return result\n```'),
dict(role='SYSTEM', prompt='Response:140'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 140'),
dict(role='HUMAN', prompt='A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:```python\ndef solution():\n num_fruits_per_first_three_basket = 9 + 15 + 14\n num_fruits_first_three_basket = num_fruits_per_first_three_basket * 3\n num_apple_fourth_basket = 9 - 2\n num_orange_fourth_basket = 15 - 2\n num_banana_fourth_basket = 14 - 2\n num_fruits_fourth_basket = num_apple_fourth_basket + num_orange_fourth_basket + num_banana_fourth_basket\n num_fruits_total = num_fruits_first_three_basket + num_fruits_fourth_basket\n result = num_fruits_total\n return result\n```"""),
dict(role='SYSTEM', prompt='Response:146'),
dict(role='BOT', prompt='Thought: According to the response, I got the answer\nFinalAnswer: 146'),
dict(role='HUMAN', prompt='{question}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer),
)
gsm8k_eval_cfg = dict(
evaluator=dict(type=Gsm8kAgentEvaluator),
pred_postprocessor=dict(type=gsm8k_postprocess),
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess),
)
gsm8k_datasets = [
dict(
abbr='gsm8k-agent',
type=GSM8KDataset,
path='./data/gsm8k',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg,
)
]
...@@ -37,8 +37,8 @@ humanevalx_eval_cfg_dict = { ...@@ -37,8 +37,8 @@ humanevalx_eval_cfg_dict = {
language=lang, language=lang,
ip_address= ip_address=
"localhost", # replace to your code_eval_server ip_address, port "localhost", # replace to your code_eval_server ip_address, port
port=5000 port=5001
), # refer to https://github.com/Ezra-Yu/code-evaluator to launch a server ), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
pred_role='BOT') pred_role='BOT')
for lang in ['python', 'cpp', 'go', 'java', 'js' for lang in ['python', 'cpp', 'go', 'java', 'js'
] # do not support rust now ] # do not support rust now
......
...@@ -18,8 +18,9 @@ humanevalx_eval_cfg_dict = { ...@@ -18,8 +18,9 @@ humanevalx_eval_cfg_dict = {
evaluator=dict( evaluator=dict(
type=HumanevalXEvaluator, type=HumanevalXEvaluator,
language=lang, language=lang,
ip_address="localhost", # replace to your code_eval_server ip_address, port ip_address=
port=5000), # refer to https://github.com/Ezra-Yu/code-evaluator to launch a server "localhost", # replace to your code_eval_server ip_address, port
port=5001), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
pred_role='BOT') pred_role='BOT')
for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now
} }
......
from mmengine.config import read_base from mmengine.config import read_base
with read_base(): with read_base():
from .reasonbench_gen_d15233 import reasonbench_datasets from .hungarian_exam_gen_8a1435 import hungarianmath_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import HungarianExamMathDataset
hungarianmath_reader_cfg = dict(input_columns=['question'], output_column=None)
template = """Problem:
Find the domain of the expression $\frac{\sqrt{x-2}}{\sqrt{5-x}}$.
Solution:
To determine the domain, we must ensure that:
1. The expressions inside each square root are non-negative.
2. The denominator is not equal to zero.
For the numerator, $x-2 \ge 0$ gives $x \ge 2$.
For the denominator, $5-x \ge 0$ gives $x \le 5$. And since the denominator cannot be zero, $5-x > 0$ which further narrows it to $x < 5$.
Combining these results, the domain of the expression is $[2,5)$.
Final Answer: The final answer is $[2,5)$.
Problem:
If $\det \mathbf{A} = 2$ and $\det \mathbf{B} = 12$, then find $\det (\mathbf{A} \mathbf{B})$.
Solution:
Using the property of determinants, we can say that:
$\det (\mathbf{A} \mathbf{B}) = (\det \mathbf{A})(\det \mathbf{B})$.
Plugging in the given values:
$\det (\mathbf{A} \mathbf{B}) = 2 \times 12 = 24$.
Final Answer: The final answer is $24$.
Problem:
Terrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight?
Solution:
First, calculate the total weight Terrell lifts with the 20-pound weights:
$2 \times 12 \times 20 = 480$ pounds.
If he uses 15-pound weights and lifts them $n$ times:
$2 \times 15 \times n = 30n$ pounds.
To find $n$, set these two equal:
\begin{align*}
30n &= 480 \\
n &= \frac{480}{30} \\
n &= 16
\end{align*}
Final Answer: The final answer is $16$.
Problem:
If the system of equations
\begin{align*}
6x-4y &= a, \\
6y-9x &= b.
\end{align*}
has a solution $(x, y)$ where $x$ and $y$ are both nonzero, find $\frac{a}{b}$, assuming $b$ is nonzero.
Solution:
Multiply the first equation by $-\frac{3}{2}$ to obtain:
$6y-9x = -\frac{3}{2}a$.
Since we also know that $6y-9x = b$, equating them gives:
$-\frac{3}{2}a = b$ which implies $\frac{a}{b} = -\frac{2}{3}$.
Final Answer: The final answer is $-\frac{2}{3}$."""
hungarianmath_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt=template+"\n\nProblem:\n{question}\n\nSolution:\n"),
],
)),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=1024))
# Attention: this math dataset needs human to evaluate the generated answer, so the AccEvaluator is just a placeholder.
hungarianmath_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
hungarianmath_datasets = [
dict(
abbr='HungarianExamMath',
type=HungarianExamMathDataset,
path='./data/HungarianExamMath/test.csv',
reader_cfg=hungarianmath_reader_cfg,
infer_cfg=hungarianmath_infer_cfg,
eval_cfg=hungarianmath_eval_cfg)
]
from mmengine.config import read_base from mmengine.config import read_base
with read_base(): with read_base():
from .longbench_multi_news_gen_f6e3fb import LongBench_multi_news_datasets # noqa: F401, F403 from .longbench_multi_news_gen_6f9da9 import LongBench_multi_news_datasets # noqa: F401, F403
...@@ -15,7 +15,7 @@ LongBench_multi_news_infer_cfg = dict( ...@@ -15,7 +15,7 @@ LongBench_multi_news_infer_cfg = dict(
type=PromptTemplate, type=PromptTemplate,
template=dict( template=dict(
round=[ round=[
dict(role='HUMAN', prompt='You are given several news passages. Write a one-page summary of all news. \n\nNews:\n{context}\n\nNow, write a one-page summary of all the news.\n\nSummary:'), dict(role='HUMAN', prompt='You are given several news passages. Write a one-page summary of all news. \n\nNews:\n{context}\n\nNow, write a one-page summary of all the news.\n\nSummary:\n'),
], )), ], )),
retriever=dict(type=ZeroRetriever), retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512) inferencer=dict(type=GenInferencer, max_out_len=512)
......
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import AgentInferencer
from opencompass.datasets import (
MATHDataset, MATHAgentEvaluator, math_postprocess_v2
)
# use pal format but not perform well
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
# # ################################### NEW SHOT ###################################
dict(role='HUMAN', prompt='Find the coefficient of $x^3$ when $3(x^2 - x^3+x) +3(x +2x^3- 3x^2 + 3x^5+x^3) -5(1+x-4x^3 - x^2)$ is simplifie.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter
Tool Input:```python
from sympy import symbols, simplify
def solution():
x = symbols('x')
expr = 3*(x**2 - x**3 + x) + 3*(x + 2*x**3 - 3*x**2 + 3*x**5 + x**3) - 5*(1 + x - 4*x**3 - x**2)
simplified_expr = simplify(expr)
x3_coefficient = simplified_expr.as_coefficients_dict()[x**3]
result = x3_coefficient
return result
```"""),
dict(role='SYSTEM', prompt='Response:26'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $26$. I hope it is correct.'),
dict(role='HUMAN', prompt='The surface area of a sphere with radius $r$ is $4\pi r^2$. Including the area of its circular base, what is the total surface area of a hemisphere with radius 6 cm? Express your answer in terms of $\pi$.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter
Tool Input:```python
import math
def solution():
radius = 6
# Surface area of the hemisphere
hemisphere_area = 2 * math.pi * radius**2
# Area of the circular base
base_area = math.pi * radius**2
# Total surface area
total_surface_area = hemisphere_area + base_area
# Formatting the result in LaTeX
result = r'{}\pi'.format(total_surface_area / math.pi)
return result
```"""),
dict(role='SYSTEM', prompt='Response:108.0\\pi'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $108.0\pi$. I hope it is correct.'),
dict(role='HUMAN', prompt='Monica tosses a fair 6-sided die. If the roll is a prime number, then she wins that amount of dollars (so that, for example, if she rolls 3, then she wins 3 dollars). If the roll is composite, she wins nothing. Otherwise, she loses 3 dollars. What is the expected value of her winnings on one die toss? Express your answer as a dollar value to the nearest cent.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter
Tool Input:```python
def solution():
# Probabilities of each outcome
prime_prob = 1 / 6
composite_prob = 1 / 3
otherwise_prob = 1 / 6
# Expected value of each outcome
prime_expected_value = (2 * prime_prob) + (3 * prime_prob) + (5 * prime_prob)
composite_expected_value = 0 * composite_prob
otherwise_expected_value = -3 * otherwise_prob
# Total expected value
total_expected_value = prime_expected_value + composite_expected_value + otherwise_expected_value
# Dollar value to the nearest cent
result = "{:.2f}".format(total_expected_value)
return result
```"""),
dict(role='SYSTEM', prompt='Response:1.17'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $1.17$. I hope it is correct.'),
dict(role='HUMAN', prompt='{problem}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer),
)
math_eval_cfg = dict(
evaluator=dict(
type=MATHAgentEvaluator,
version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2))
math_datasets = [
dict(
abbr='math-agent',
type=MATHDataset,
path='./data/math/math.json',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import AgentInferencer
from opencompass.datasets import (
MATHDataset, MATHAgentEvaluator, math_postprocess_v2
)
# use pal format but not perform well
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
# # ################################### NEW SHOT ###################################
dict(role='HUMAN', prompt='Find the coefficient of $x^3$ when $3(x^2 - x^3+x) +3(x +2x^3- 3x^2 + 3x^5+x^3) -5(1+x-4x^3 - x^2)$ is simplifie.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:from sympy import symbols, simplify
def solution():
x = symbols('x')
expr = 3*(x**2 - x**3 + x) + 3*(x + 2*x**3 - 3*x**2 + 3*x**5 + x**3) - 5*(1 + x - 4*x**3 - x**2)
simplified_expr = simplify(expr)
x3_coefficient = simplified_expr.as_coefficients_dict()[x**3]
result = x3_coefficient
return result"""),
dict(role='SYSTEM', prompt='Response:26'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $26$. I hope it is correct.'),
dict(role='HUMAN', prompt='The surface area of a sphere with radius $r$ is $4\pi r^2$. Including the area of its circular base, what is the total surface area of a hemisphere with radius 6 cm? Express your answer in terms of $\pi$.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:import math
def solution():
radius = 6
# Surface area of the hemisphere
hemisphere_area = 2 * math.pi * radius**2
# Area of the circular base
base_area = math.pi * radius**2
# Total surface area
total_surface_area = hemisphere_area + base_area
# Formatting the result in LaTeX
result = r'{}\pi'.format(total_surface_area / math.pi)
return result"""),
dict(role='SYSTEM', prompt='Response:108.0\\pi'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $108.0\pi$. I hope it is correct.'),
dict(role='HUMAN', prompt='Monica tosses a fair 6-sided die. If the roll is a prime number, then she wins that amount of dollars (so that, for example, if she rolls 3, then she wins 3 dollars). If the roll is composite, she wins nothing. Otherwise, she loses 3 dollars. What is the expected value of her winnings on one die toss? Express your answer as a dollar value to the nearest cent.'),
dict(role='BOT', prompt="""Tool:PythonInterpreter\nTool Input:def solution():
# Probabilities of each outcome
prime_prob = 1 / 6
composite_prob = 1 / 3
otherwise_prob = 1 / 6
# Expected value of each outcome
prime_expected_value = (2 * prime_prob) + (3 * prime_prob) + (5 * prime_prob)
composite_expected_value = 0 * composite_prob
otherwise_expected_value = -3 * otherwise_prob
# Total expected value
total_expected_value = prime_expected_value + composite_expected_value + otherwise_expected_value
# Dollar value to the nearest cent
result = "{:.2f}".format(total_expected_value)
return result"""),
dict(role='SYSTEM', prompt='Response:1.17'),
dict(role='BOT', prompt='FinalAnswer: The final answer is $1.17$. I hope it is correct.'),
dict(role='HUMAN', prompt='{problem}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer),
)
math_eval_cfg = dict(
evaluator=dict(
type=MATHAgentEvaluator,
version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2))
math_datasets = [
dict(
abbr='math-agent',
type=MATHDataset,
path='./data/math/math.json',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment