Unverified Commit e78857ac authored by Hubert's avatar Hubert Committed by GitHub
Browse files

[Sync] minor test (#683)

parent dd4318f6
...@@ -11,6 +11,7 @@ configs/eval_debug*.py ...@@ -11,6 +11,7 @@ configs/eval_debug*.py
configs/viz_*.py configs/viz_*.py
data data
work_dirs work_dirs
models
configs/internal/ configs/internal/
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
......
from mmengine.config import read_base from mmengine.config import read_base
with read_base(): with read_base():
from .CIBench_gen_eb42f9 import ci_datasets # noqa: F401, F403 from .CIBench_gen_8ab0dc import ci_datasets # noqa: F401, F403
...@@ -16,28 +16,20 @@ cibench_infer_cfg = dict( ...@@ -16,28 +16,20 @@ cibench_infer_cfg = dict(
template="""{questions}""", template="""{questions}""",
), ),
retriever=dict(type=ZeroRetriever), retriever=dict(type=ZeroRetriever),
inferencer=dict(type=AgentInferencer), inferencer=dict(type=AgentInferencer, infer_mode='every'),
) )
libs = ['Pandas', 'Matplotlib', 'Opencv', 'SciPy', 'Seaborn', 'PyTorch'] libs = ['Pandas', 'Matplotlib', 'Opencv', 'SciPy', 'Seaborn', 'PyTorch']
cibench_eval_cfg = { cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
lib: dict(
evaluator=dict(
type=CIBenchEvaluator,
output_dir=f'output_data/cibench/{lib}'),
pred_role="BOT",
)
for lib in libs
}
cibench_datasets = [ cibench_datasets = [
dict( dict(
abbr=f"cibench_{lib}", abbr=f"cibench_generation_{lib}",
type=CIBenchDataset, type=CIBenchDataset,
path=f"./data/cibench/{lib}", path=f"./data/cibench/{lib}",
reader_cfg=cibench_reader_cfg, reader_cfg=cibench_reader_cfg,
infer_cfg=cibench_infer_cfg, infer_cfg=cibench_infer_cfg,
eval_cfg=cibench_eval_cfg[lib], eval_cfg=cibench_eval_cfg,
) for lib in libs ) for lib in libs
] ]
...@@ -95,7 +95,7 @@ mathbench_sets = { ...@@ -95,7 +95,7 @@ mathbench_sets = {
# Use circular evaluation or not # Use circular evaluation or not
with_circular_eval = True with_circular_eval = True
mathbench_code_datasets = [] mathbench_agent_datasets = []
for _split in list(mathbench_sets.keys()): for _split in list(mathbench_sets.keys()):
for _name in mathbench_sets[_split]: for _name in mathbench_sets[_split]:
...@@ -112,13 +112,13 @@ for _split in list(mathbench_sets.keys()): ...@@ -112,13 +112,13 @@ for _split in list(mathbench_sets.keys()):
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator), evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name)) pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
mathbench_code_datasets.append( mathbench_agent_datasets.append(
dict( dict(
abbr="mathbench-" + _split + '-' + _name + '-agent',
type=MathBenchDataset, type=MathBenchDataset,
path=f"./data/mathbench/{_split}", path=f"./data/mathbench/{_split}",
name=_name, name=_name,
with_circular=with_circular_eval, with_circular=with_circular_eval,
abbr="mathbench-interpreter-" + _split + '-' + _name,
reader_cfg=dict( reader_cfg=dict(
input_columns=["question"], input_columns=["question"],
output_column="answer" output_column="answer"
......
...@@ -16,7 +16,7 @@ cloze_prompts ={ ...@@ -16,7 +16,7 @@ cloze_prompts ={
dict(role='BOT', prompt='A: First, (-55)/(6930/(-382)) = (-55)/(-(6930/382)) = 55*382/6930 = 21010/6930 = 2101/693. Then, 2101/693 + (0 - 3) = 2101/693 - 3 = 2101/693 - 3*693/693 = (2101-2079)/693 = 22/693 = 2/63. The answer is 2/63.\n'), dict(role='BOT', prompt='A: First, (-55)/(6930/(-382)) = (-55)/(-(6930/382)) = 55*382/6930 = 21010/6930 = 2101/693. Then, 2101/693 + (0 - 3) = 2101/693 - 3 = 2101/693 - 3*693/693 = (2101-2079)/693 = 22/693 = 2/63. The answer is 2/63.\n'),
dict(role='HUMAN', prompt='Q: {question}'), dict(role='HUMAN', prompt='Q: {question}'),
dict(role='BOT', prompt='A: {answer}\n'), dict(role='BOT', prompt='A: {answer}\n'),
] ]
} }
mathbench_sets = { mathbench_sets = {
......
...@@ -94,11 +94,11 @@ for _split in list(mathbench_sets.keys()): ...@@ -94,11 +94,11 @@ for _split in list(mathbench_sets.keys()):
mathbench_datasets.append( mathbench_datasets.append(
dict( dict(
abbr="mathbench-" + _split + '-' + _name,
type=MathBenchDataset, type=MathBenchDataset,
path=f"./data/mathbench/{_split}", path=f"./data/mathbench/{_split}",
name=_name, name=_name,
with_circular=with_circular_eval, with_circular=with_circular_eval,
abbr="mathbench-" + _split + '-' + _name,
reader_cfg=dict( reader_cfg=dict(
input_columns=["question"], input_columns=["question"],
output_column="answer" output_column="answer"
......
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import (DS1000Dataset, ds1000_completion_postprocess,
ds1000_matplotlib_postprocess,
DS1000Evaluator)
ds1000_reader_cfg = dict(
input_columns=["prompt"],
output_column="test_column",
train_split='test',
test_split='test')
ds1000_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt="{prompt}",
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
ds1000_eval_cfg = dict(
evaluator=dict(type=DS1000Evaluator),
pred_role="BOT",
pred_postprocessor=dict(type=ds1000_completion_postprocess),
)
# The DS-1000 dataset can be downloaded from
# https://github.com/HKUNLP/DS-1000/blob/main/ds1000_data.zip
ds1000_datasets = [
dict(
abbr=f"ds1000_{lib}",
type=DS1000Dataset,
path="./data/ds1000_data/",
libs=f"{lib}",
mode="Completion",
reader_cfg=ds1000_reader_cfg,
infer_cfg=ds1000_infer_cfg,
eval_cfg=ds1000_eval_cfg,
) for lib in [
'Pandas',
'Numpy',
'Tensorflow',
'Scipy',
'Sklearn',
'Pytorch',
]
]
ds1000_datasets.append(
dict(
abbr="ds1000_Matplotlib",
type=DS1000Dataset,
path="./data/ds1000_data/",
libs="Matplotlib",
mode="Completion",
reader_cfg=ds1000_reader_cfg,
infer_cfg=ds1000_infer_cfg,
eval_cfg=dict(
evaluator=dict(type=DS1000Evaluator),
pred_role="BOT",
pred_postprocessor=dict(type=ds1000_matplotlib_postprocess),
),
))
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import DS1000Dataset, DS1000ServiceEvaluator
ds1000_reader_cfg = dict(
input_columns=["prompt"],
output_column="test_column",
train_split='test',
test_split='test')
ds1000_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role="HUMAN",
prompt="{prompt}",
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
ds1000_eval_cfg_dict = {
lib: dict(
evaluator=dict(
type=DS1000ServiceEvaluator,
lib=lib,
ip_address=
"localhost", # replace to your code_eval_server ip_address, port
port=5000
),
pred_role="BOT")
for lib in [
'Pandas',
'Numpy',
'Tensorflow',
'Scipy',
'Sklearn',
'Pytorch',
'Matplotlib',
]
}
# The DS-1000 dataset can be downloaded from
# https://github.com/HKUNLP/DS-1000/blob/main/ds1000_data.zip
ds1000_datasets = [
dict(
abbr=f"ds1000_{lib}",
type=DS1000Dataset,
path="./data/ds1000_data/",
libs=f"{lib}",
mode="Completion",
reader_cfg=ds1000_reader_cfg,
infer_cfg=ds1000_infer_cfg,
eval_cfg=ds1000_eval_cfg_dict[lib],
) for lib in [
'Pandas',
'Numpy',
'Tensorflow',
'Scipy',
'Sklearn',
'Pytorch',
'Matplotlib',
]
]
...@@ -45,7 +45,7 @@ gsm8k_eval_cfg = dict( ...@@ -45,7 +45,7 @@ gsm8k_eval_cfg = dict(
gsm8k_datasets = [ gsm8k_datasets = [
dict( dict(
abbr='gsm8k', abbr='gsm8k-agent',
type=GSM8KDataset, type=GSM8KDataset,
path='./data/gsm8k', path='./data/gsm8k',
reader_cfg=gsm8k_reader_cfg, reader_cfg=gsm8k_reader_cfg,
......
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import GSM8KDataset, gsm8k_postprocess, gsm8k_dataset_postprocess, Gsm8kEvaluator
gsm8k_reader_cfg = dict(input_columns=['question'], output_column='answer')
gsm8k_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt="Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\nAnswer:"),
dict(role='BOT', prompt="Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n"),
dict(role='HUMAN', prompt="Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\nAnswer:"),
dict(role='BOT', prompt="Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n"),
dict(role='HUMAN', prompt="Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\nAnswer:"),
dict(role='BOT', prompt="When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n"),
dict(role='HUMAN', prompt="Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\nAnswer:"),
dict(role='BOT', prompt="For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n"),
dict(role='HUMAN', prompt="Question: {question}\nLet's think step by step\nAnswer:"),
],
)),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512, stopping_criteria=[":", "Question:", "Question"]))
gsm8k_eval_cfg = dict(evaluator=dict(type=Gsm8kEvaluator),
pred_postprocessor=dict(type=gsm8k_postprocess),
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess))
gsm8k_datasets = [
dict(
abbr='gsm8k',
type=GSM8KDataset,
path='./data/gsm8k',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg)
]
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLOnlyInferencer
from opencompass.openicl.icl_evaluator import AveragePPLEvaluator
from opencompass.datasets import GSM8KDataset, GSM8KReferenceSkywork
gsm8k_datasets = []
gsm8k_infer_cfg = dict(
prompt_template=dict(type=PromptTemplate, template="{question} {answer}"),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLOnlyInferencer),
)
gsm8k_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
for split in ['train', 'test']:
gsm8k_reader_cfg = dict(
input_columns=['question', 'answer'],
output_column=None,
train_split=split,
test_split=split,
)
gsm8k_datasets.append(
dict(
abbr=f'gsm8k-{split}-ppl',
type=GSM8KDataset,
path='./data/gsm8k',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg)
)
gsm8k_infer_cfg = dict(
prompt_template=dict(type=PromptTemplate, template="{text}"),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLOnlyInferencer),
)
gsm8k_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
gsm8k_reader_cfg = dict(
input_columns=['text'],
output_column=None,
)
gsm8k_datasets.append(
dict(
abbr=f'gsm8k-ref-ppl',
type=GSM8KReferenceSkywork,
path='./data/gsm8k-extra/mock_gsm8k_test.jsonl',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg
)
)
...@@ -79,7 +79,7 @@ math_eval_cfg = dict( ...@@ -79,7 +79,7 @@ math_eval_cfg = dict(
math_datasets = [ math_datasets = [
dict( dict(
abbr='math', abbr='math-agent',
type=MATHDataset, type=MATHDataset,
path='./data/math/math.json', path='./data/math/math.json',
reader_cfg=math_reader_cfg, reader_cfg=math_reader_cfg,
......
from mmengine.config import read_base from mmengine.config import read_base
with read_base(): with read_base():
from .winogrande_ppl_55a66e import winogrande_datasets # noqa: F401, F403 from .winogrande_ppl_8be6c3 import winogrande_datasets # noqa: F401, F403
...@@ -4,6 +4,10 @@ from opencompass.openicl.icl_inferencer import PPLInferencer ...@@ -4,6 +4,10 @@ from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import winograndeDataset from opencompass.datasets import winograndeDataset
# WARNING: This config cannot reproduce results in the paper.
# e.g. LLAMA2-7B Winogrande 69.2 (paper) -> 62.27 (this config)
# Please try winogrande_ppl_8be6c3
winogrande_reader_cfg = dict( winogrande_reader_cfg = dict(
input_columns=['opt1', 'opt2'], input_columns=['opt1', 'opt2'],
output_column='answer', output_column='answer',
......
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import LoglikelihoodInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import winograndeDataset
winogrande_reader_cfg = dict(
input_columns=['opt1', 'opt2'],
output_column='answer',
)
winogrande_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
1: "{opt1}",
2: "{opt2}",
}
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=LoglikelihoodInferencer))
winogrande_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
winogrande_datasets = [
dict(
abbr='winogrande',
type=winograndeDataset,
path='./data/winogrande',
reader_cfg=winogrande_reader_cfg,
infer_cfg=winogrande_infer_cfg,
eval_cfg=winogrande_eval_cfg)
]
...@@ -4,6 +4,10 @@ from opencompass.openicl.icl_inferencer import PPLInferencer ...@@ -4,6 +4,10 @@ from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import winograndeDataset from opencompass.datasets import winograndeDataset
# WARNING: This config cannot reproduce results in the paper.
# e.g. LLAMA2-7B Winogrande 69.2 (paper) -> 62.27 (this config)
# Please try winogrande_ppl_8be6c3
winogrande_reader_cfg = dict( winogrande_reader_cfg = dict(
input_columns=['opt1', 'opt2'], input_columns=['opt1', 'opt2'],
output_column='answer', output_column='answer',
......
...@@ -4,11 +4,20 @@ from opencompass.partitioners import SizePartitioner ...@@ -4,11 +4,20 @@ from opencompass.partitioners import SizePartitioner
from opencompass.runners import LocalRunner from opencompass.runners import LocalRunner
from opencompass.tasks import OpenICLInferTask from opencompass.tasks import OpenICLInferTask
from opencompass.models.lagent import LagentAgent from opencompass.models.lagent import LagentAgent
from lagent import PythonInterpreter, ReAct from opencompass.lagent.actions.python_interpreter import PythonInterpreter
from lagent import ReAct
from lagent.agents.react import ReActProtocol from lagent.agents.react import ReActProtocol
with read_base(): with read_base():
from .datasets.gsm8k.gsm8k_agent_gen_3ac57d import gsm8k_datasets as datasets from .datasets.gsm8k.gsm8k_agent_gen_3ac57d import gsm8k_datasets
from .datasets.math.math_agent_gen_861b4f import math_datasets
from .datasets.MathBench.mathbench_agent_gen_568903 import mathbench_agent_datasets
from .summarizers.math_agent import summarizer
datasets = []
datasets += gsm8k_datasets
datasets += math_datasets
datasets += mathbench_agent_datasets
system_prompt = """You are a helpful assistant which use tools to solve mathematical reasoning questions. The code must be a function, and the function name must be 'solution'. For mathematics, please use code tool to calculate. The example format is as follows: system_prompt = """You are a helpful assistant which use tools to solve mathematical reasoning questions. The code must be a function, and the function name must be 'solution'. For mathematics, please use code tool to calculate. The example format is as follows:
``` ```
......
...@@ -10,7 +10,7 @@ from opencompass.runners import LocalRunner ...@@ -10,7 +10,7 @@ from opencompass.runners import LocalRunner
from opencompass.tasks import OpenICLInferTask from opencompass.tasks import OpenICLInferTask
with read_base(): with read_base():
from .datasets.CIBench.CIBench_gen_eb42f9 import \ from .datasets.CIBench.CIBench_gen_8ab0dc import \
cibench_datasets as datasets cibench_datasets as datasets
FORCE_STOP_PROMPT_EN = """You should directly give results based on history information.""" FORCE_STOP_PROMPT_EN = """You should directly give results based on history information."""
...@@ -36,7 +36,21 @@ Also please follow the guidelines: ...@@ -36,7 +36,21 @@ Also please follow the guidelines:
3. The generated codes will be executed in an ipython manner and the results will be cached. 3. The generated codes will be executed in an ipython manner and the results will be cached.
4. Your responded code should always be simple and only solves the problem in current step. 4. Your responded code should always be simple and only solves the problem in current step.
Begin! For example:
File url: `xxxx`
### Step 1. Load the dataset from the url into a pandas DataFrame named `df`.
{thought} We should use `pandas` to solve this step.
{action} IPythonInterpreter
{action_input} ```python
import pandas as pd
url = "xxxx"
data = pd.read_csv(url)
```
{response} The code is succeed without any outputs.
Let us begin from here!
""" """
IPYTHON_INTERPRETER_DESCRIPTION = '''\ IPYTHON_INTERPRETER_DESCRIPTION = '''\
...@@ -69,9 +83,6 @@ models = [ ...@@ -69,9 +83,6 @@ models = [
), ),
] ]
for dataset in datasets:
# Evaluate on every assistant response
dataset['infer_cfg']['inferencer']['infer_mode'] = 'every'
infer = dict( infer = dict(
partitioner=dict(type=SizePartitioner, max_task_size=1000), partitioner=dict(type=SizePartitioner, max_task_size=1000),
......
from mmengine.config import read_base
from opencompass.models.openai_api import OpenAI
from opencompass.partitioners import SizePartitioner
from opencompass.runners import LocalRunner
from opencompass.tasks import OpenICLInferTask
from opencompass.models.lagent import LagentAgent
from lagent import PythonInterpreter, ReAct
from lagent.agents.react import ReActProtocol
system_prompt = """You are a helpful assistant which use tools to solve mathematical reasoning questions. The code must be a function, and the function name must be 'solution'. For mathematics, please use code tool to calculate. The example format is as follows:
```
def solution():
variable_names_with_real_meaning = func(variable)
return variable_names_with_real_meaning
```"""
protocol = dict(
type=ReActProtocol,
action=dict(role="ACTION", begin="Tool:", end="\n"),
action_input=dict(role="ARGS", begin="Tool Input:", end="\n"),
finish=dict(role="FINISH", begin="FinalAnswer:", end="\n"),
call_protocol=system_prompt,
)
with read_base():
from .datasets.MathBench.mathbench_code_gen_568903 import mathbench_code_datasets as datasets
from .summarizers.mathbench import summarizer
models = [
dict(
abbr='gpt-3.5-react',
type=LagentAgent,
agent_type=ReAct,
max_turn=3,
llm=dict(
type=OpenAI,
path='gpt-3.5-turbo',
key='ENV',
query_per_second=1,
max_seq_len=4096,
),
actions=[
dict(type=PythonInterpreter),
],
protocol=protocol,
batch_size=1,
),
]
infer = dict(
partitioner=dict(type=SizePartitioner, max_task_size=1000),
runner=dict(
type=LocalRunner,
max_num_workers=16,
task=dict(type=OpenICLInferTask)),
)
from mmengine.config import read_base
with read_base():
from .models.qwen.hf_qwen_7b import models as hf_qwen_7b_base_models
from .models.qwen.hf_qwen_7b_chat import models as hf_qwen_7b_chat_models
from .datasets.ceval.ceval_ppl_578f8d import ceval_datasets as base_ceval_datasets
from .datasets.ceval.ceval_gen_5f30c7 import ceval_datasets as chat_ceval_datasets
from .internal.clusters.slurm import infer, eval
# from .clusters.slurm import infer_split as infer, eval
# from .clusters.slurm import infer_size as infer, eval
# from .clusters.slurm import infer_size_split as infer, eval
base_ceval_datasets = base_ceval_datasets[:1]
chat_ceval_datasets = chat_ceval_datasets[-1:]
# If you do not want to run all the combinations of models and datasets, you
# can specify the combinations you want to run here. This is useful when you
# deleberately want to skip some subset of the combinations.
# Models and datasets in different combinations are recommended to be disjoint
# (different `abbr` in model & dataset configs), as we haven't tested this case
# throughly.
model_dataset_combinations = [
dict(models=hf_qwen_7b_base_models, datasets=base_ceval_datasets),
dict(models=hf_qwen_7b_chat_models, datasets=chat_ceval_datasets),
# dict(models=[model_cfg1, ...], datasets=[dataset_cfg1, ...]),
]
# This union of models and datasets in model_dataset_combinations should be
# stored in the `models` and `datasets` variables below. Otherwise, modules
# like summarizer will miss out some information.
models = [*hf_qwen_7b_base_models, *hf_qwen_7b_chat_models]
datasets = [*base_ceval_datasets, *chat_ceval_datasets]
work_dir = './outputs/default/mdcomb/'
"""
dataset version metric mode qwen-7b-hf qwen-7b-chat-hf
---------------------- --------- -------- ------ ------------ -----------------
ceval-computer_network 9b9417 accuracy ppl 52.63 -
ceval-physician 6e277d accuracy gen - 59.18
"""
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment