Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
from mmengine.config import read_base
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
with read_base():
from .OlympiadBench_categories import math_categories as categories
# Create prompter instance for problems
olympiadbench_prompter_cfg = dict(
type='OlympiadBenchPrompter'
)
olympiadbench_reader_cfg = dict(
input_columns=[
'problem', 'language', 'subject', 'question_type',
'answer_type', 'is_multiple_answer', 'unit', 'questions'
],
output_column='solution'
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
olympiadbenchMath_datasets = []
for _name in categories:
olympiadbench_infer_cfg = dict(
prompt_template=dict(
type='OlympiadBenchTemplate'
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
olympiadbench_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=OlympiadBenchDataset,
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
olympiadbenchMath_datasets.append(
dict(
type=OlympiadBenchDataset,
abbr=f'OlympiadBench_{_name}',
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
infer_cfg=olympiadbench_infer_cfg,
eval_cfg=olympiadbench_eval_cfg,
)
)
del _name
from mmengine.config import read_base
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.evaluator import (
GenericLLMEvaluator,
CascadeEvaluator,
MATHVerifyEvaluator
)
from opencompass.datasets import generic_llmjudge_postprocess
with read_base():
from .OlympiadBench_categories import categories
# Create prompter instance for problems
olympiadbench_prompter_cfg = dict(
type='OlympiadBenchPrompter'
)
olympiadbench_reader_cfg = dict(
input_columns=[
'problem', 'language', 'subject', 'question_type',
'answer_type', 'is_multiple_answer', 'unit', 'questions'
],
output_column='solution'
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
olympiadbench_datasets = []
for _name in categories:
olympiadbench_infer_cfg = dict(
prompt_template=dict(
type='OlympiadBenchTemplate'
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
olympiadbench_eval_cfg = dict(
evaluator=dict(
type=CascadeEvaluator,
rule_evaluator=dict(
type=MATHVerifyEvaluator,
),
llm_evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=OlympiadBenchDataset,
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
parallel=False
)
)
olympiadbench_datasets.append(
dict(
type=OlympiadBenchDataset,
abbr=f'OlympiadBench_{_name}',
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
infer_cfg=olympiadbench_infer_cfg,
eval_cfg=olympiadbench_eval_cfg,
n=1,
)
)
from mmengine.config import read_base
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2
with read_base():
from .OlympiadBench_categories import categories
# Create prompter instance for problems
olympiadbench_prompter_cfg = dict(
type='OlympiadBenchPrompter'
)
olympiadbench_reader_cfg = dict(
input_columns=[
'problem', 'language', 'subject', 'question_type',
'answer_type', 'is_multiple_answer', 'unit', 'questions'
],
output_column='solution'
)
olympiadbench_datasets = []
for _name in categories:
olympiadbench_infer_cfg = dict(
prompt_template=dict(
type='OlympiadBenchTemplate'
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
olympiadbench_eval_cfg = dict(
evaluator=dict(type=OlympiadBenchEvaluator, version='v2'),
pred_postprocessor=dict(type=olympiadbench_postprocess_v2),
)
olympiadbench_datasets.append(
dict(
type=OlympiadBenchDataset,
abbr=f'OlympiadBench_{_name}',
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
infer_cfg=olympiadbench_infer_cfg,
eval_cfg=olympiadbench_eval_cfg,
)
)
del _name
from mmengine.config import read_base
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
with read_base():
from .OlympiadBench_categories import categories
# Create prompter instance for problems
olympiadbench_prompter_cfg = dict(
type='OlympiadBenchPrompter'
)
olympiadbench_reader_cfg = dict(
input_columns=[
'problem', 'language', 'subject', 'question_type',
'answer_type', 'is_multiple_answer', 'unit', 'questions'
],
output_column='solution'
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
olympiadbench_datasets = []
for _name in categories:
olympiadbench_infer_cfg = dict(
prompt_template=dict(
type='OlympiadBenchTemplate'
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# olympiadbench_eval_cfg = dict(
# evaluator=dict(type=OlympiadBenchEvaluator, version='v2'),
# pred_postprocessor=dict(type=olympiadbench_postprocess_v2),
# )
# Evaluation configuration
olympiadbench_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=OlympiadBenchDataset,
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
olympiadbench_datasets.append(
dict(
type=OlympiadBenchDataset,
abbr=f'OlympiadBench_{_name}',
path='opencompass/OlympiadBench',
name=_name,
reader_cfg=olympiadbench_reader_cfg,
infer_cfg=olympiadbench_infer_cfg,
eval_cfg=olympiadbench_eval_cfg,
)
)
del _name
categories = [
'OE_TO_maths_en_COMP', # OpenEnded - TextOnly - maths - COMP
'OE_TO_maths_zh_COMP', # OpenEnded - TextOnly - maths - COMP
'OE_TO_maths_zh_CEE', # OpenEnded - TextOnly - maths - CEE
'OE_TO_physics_en_COMP', # OpenEnded - TextOnly - physics - COMP
'OE_TO_physics_zh_CEE' # OpenEnded - TextOnly - physics - CEE
]
math_categories = [
'OE_TO_maths_en_COMP', # OpenEnded - TextOnly - maths - COMP
'OE_TO_maths_zh_COMP', # OpenEnded - TextOnly - maths - COMP
'OE_TO_maths_zh_CEE', # OpenEnded - TextOnly - maths - CEE
]
physics_categories = [
'OE_TO_physics_en_COMP', # OpenEnded - TextOnly - physics - COMP
'OE_TO_physics_zh_CEE' # OpenEnded - TextOnly - physics - CEE
]
from mmengine.config import read_base
with read_base():
from .OpenFinData_gen_46dedb import OpenFinData_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets.OpenFinData import OpenFinDataDataset, OpenFinDataKWEvaluator
from opencompass.utils.text_postprocessors import last_capital_postprocess
OpenFinData_datasets = []
OpenFinData_3choices_list = ['emotion_identification', 'entity_disambiguation', 'financial_facts']
OpenFinData_4choices_list = ['data_inspection', 'financial_terminology', 'metric_calculation', 'value_extraction']
OpenFinData_5choices_list = ['intent_understanding']
OpenFinData_keyword_list = ['entity_recognition']
OpenFinData_all_list = OpenFinData_3choices_list + OpenFinData_4choices_list + OpenFinData_5choices_list + OpenFinData_keyword_list
OpenFinData_eval_cfg = dict(evaluator=dict(type=AccEvaluator), pred_postprocessor=dict(type=last_capital_postprocess))
OpenFinData_KW_eval_cfg = dict(evaluator=dict(type=OpenFinDataKWEvaluator))
for _name in OpenFinData_all_list:
if _name in OpenFinData_3choices_list:
OpenFinData_infer_cfg = dict(
ice_template=dict(type=PromptTemplate, template=dict(begin='</E>', round=[
dict(role='HUMAN', prompt=f'{{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\n答案: '),
dict(role='BOT', prompt='{answer}')]),
ice_token='</E>'), retriever=dict(type=ZeroRetriever), inferencer=dict(type=GenInferencer))
OpenFinData_datasets.append(
dict(
type=OpenFinDataDataset,
path='./data/openfindata_release',
name=_name,
abbr='OpenFinData-' + _name,
reader_cfg=dict(
input_columns=['question', 'A', 'B', 'C'],
output_column='answer'),
infer_cfg=OpenFinData_infer_cfg,
eval_cfg=OpenFinData_eval_cfg,
))
if _name in OpenFinData_4choices_list:
OpenFinData_infer_cfg = dict(
ice_template=dict(type=PromptTemplate, template=dict(begin='</E>', round=[
dict(role='HUMAN', prompt=f'{{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案: '),
dict(role='BOT', prompt='{answer}')]),
ice_token='</E>'), retriever=dict(type=ZeroRetriever), inferencer=dict(type=GenInferencer))
OpenFinData_datasets.append(
dict(
type=OpenFinDataDataset,
path='./data/openfindata_release',
name=_name,
abbr='OpenFinData-' + _name,
reader_cfg=dict(
input_columns=['question', 'A', 'B', 'C', 'D'],
output_column='answer'),
infer_cfg=OpenFinData_infer_cfg,
eval_cfg=OpenFinData_eval_cfg,
))
if _name in OpenFinData_5choices_list:
OpenFinData_infer_cfg = dict(
ice_template=dict(type=PromptTemplate, template=dict(begin='</E>', round=[
dict(role='HUMAN', prompt=f'{{question}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n答案: '),
dict(role='BOT', prompt='{answer}')]),
ice_token='</E>'), retriever=dict(type=ZeroRetriever), inferencer=dict(type=GenInferencer))
OpenFinData_datasets.append(
dict(
type=OpenFinDataDataset,
path='./data/openfindata_release',
name=_name,
abbr='OpenFinData-' + _name,
reader_cfg=dict(
input_columns=['question', 'A', 'B', 'C', 'D', 'E'],
output_column='answer'),
infer_cfg=OpenFinData_infer_cfg,
eval_cfg=OpenFinData_eval_cfg,
))
if _name in OpenFinData_keyword_list:
OpenFinData_infer_cfg = dict(
ice_template=dict(type=PromptTemplate, template=dict(begin='</E>', round=[
dict(role='HUMAN', prompt=f'{{question}}\n答案: '),
dict(role='BOT', prompt='{answer}')]),
ice_token='</E>'), retriever=dict(type=ZeroRetriever), inferencer=dict(type=GenInferencer))
OpenFinData_datasets.append(
dict(
type=OpenFinDataDataset,
path='./data/openfindata_release',
name=_name,
abbr='OpenFinData-' + _name,
reader_cfg=dict(
input_columns=['question'],
output_column='answer'),
infer_cfg=OpenFinData_infer_cfg,
eval_cfg=OpenFinData_KW_eval_cfg,
))
del _name
# OpenFinData
## Introduction
The following introduction comes from the introduction in [OpenFinData](https://github.com/open-compass/OpenFinData)
```
OpenFinData是由东方财富与上海人工智能实验室联合发布的开源金融评测数据集。该数据集代表了最真实的产业场景需求,是目前场景最全、专业性最深的金融评测数据集。它基于东方财富实际金融业务的多样化丰富场景,旨在为金融科技领域的研究者和开发者提供一个高质量的数据资源。
OpenFinData is an open source financial evaluation dataset jointly released by Oriental Fortune and Shanghai Artificial Intelligence Laboratory. This data set represents the most realistic industrial scenario needs and is currently the most comprehensive and professional financial evaluation data set. It is based on the diverse and rich scenarios of Oriental Fortune's actual financial business and aims to provide a high-quality data resource for researchers and developers in the field of financial technology.
```
## Official link
### Repository
[OpenFinData](https://github.com/open-compass/OpenFinData)
## Use cases
In evaluation scripts, add OpenFinData dataset as other datasets by using
```
from .datasets.OepnFinData.OpenFinData_gen import OpenFinData_datasets
```
## Examples
Input example I:
```
你是一个数据审核小助手。表格内给出了2023年11月10日文一科技(600520)的最新数据,请指出其中哪个数据有误。请给出正确选项。
| 代码 | 名称 | 最新 | 涨幅% | 涨跌 | 成交量(股) | 成交额(元) | 流通市值 | 总市值 | 所属行业 |
|-------:|:-----|------:|------:|-----:|---------:|-----------:|-----------:|-----------:|:-------|
| 600520 | 文一科技 | 34.01 | 9.99 | 3.09 | 74227945 | 2472820896 | 5388200000 | 5388204300 | 通用设备 |
A. 2023年11月10日文一科技最新价34.01
B. 2023年11月10日文一科技成交额为2472820896
C. 文一科技的流通市值和总市值可能有误,因为流通市值5388200000元大于总市值5388204300元
D. 无明显错误数据
答案:
```
Output example I (from QWen-14B-Chat):
```
C. 文一科技的流通市值和总市值可能有误,因为流通市值5388200000元大于总市值5388204300元。
```
Input example II:
```
你是一个实体识别助手。请列出以下内容中提及的公司。
一度扬帆顺风的光伏产业,在过去几年中,面对潜在的高利润诱惑,吸引了众多非光伏行业的上市公司跨界转战,试图分得一杯羹。然而,今年下半年以来,出现了一个显著的趋势:一些跨界公司开始放弃或削减其光伏项目,包括皇氏集团(002329.SZ)、乐通股份(002319.SZ)、奥维通信(002231.SZ)等近十家公司。此外,还有一些光伏龙头放缓投资计划,如大全能源(688303.SH)、通威股份(600438.SZ)。业内人士表示,诸多因素导致了这股热潮的退却,包括市场变化、技术门槛、政策调整等等。光伏产业经历了从快速扩张到现在的理性回调,行业的自我调整和生态平衡正在逐步展现。从财务状况来看,较多选择退出的跨界企业都面临着经营压力。不过,皇氏集团、乐通股份等公司并未“全身而退”,仍在保持对光伏市场的关注,寻求进一步开拓的可能性。
答案:
```
Output example II (from InternLM2-7B-Chat):
```
皇氏集团(002329.SZ)、乐通股份(002319.SZ)、奥维通信(002231.SZ)、大全能源(688303.SH)、通威股份(600438.SZ)
```
## Evaluation results
```
dataset version metric mode qwen-14b-chat-hf internlm2-chat-7b-hf
---------------------------------- --------- -------- ------ ------------------ ----------------------
OpenFinData-emotion_identification b64193 accuracy gen 85.33 78.67
OpenFinData-entity_disambiguation b64193 accuracy gen 52 68
OpenFinData-financial_facts b64193 accuracy gen 70.67 46.67
OpenFinData-data_inspection a846b7 accuracy gen 53.33 51.67
OpenFinData-financial_terminology a846b7 accuracy gen 84 73.33
OpenFinData-metric_calculation a846b7 accuracy gen 55.71 68.57
OpenFinData-value_extraction a846b7 accuracy gen 84.29 71.43
OpenFinData-intent_understanding f0bd9e accuracy gen 88 86.67
OpenFinData-entity_recognition 81aeeb accuracy gen 68 84
```
from opencompass.datasets import PhyBenchDataset, MathEEDEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
phybench_reader_cfg = dict(
input_columns=['input'],
output_column='target',
)
phybench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt='Solve the following physics problem and return only the final result as a clean LaTeX expression.Remember to put your final answer within \\boxed{}.\n\nQuestion: {{input}}\nAnswer: ',
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
phybench_eval_cfg = dict(
evaluator=dict(type=MathEEDEvaluator)
)
phybench_datasets = [
dict(
abbr='phybench-eed',
type=PhyBenchDataset,
path='opencompass/PHYBench',
reader_cfg=phybench_reader_cfg,
infer_cfg=phybench_infer_cfg,
eval_cfg=phybench_eval_cfg,
)
]
from mmengine.config import read_base
with read_base():
from .PHYSICS_llm_judge_gen_a133a2 import physics_datasets # noqa: F401, F403
\ No newline at end of file
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import (
PHYSICSDataset,
generic_llmjudge_postprocess,
)
from opencompass.evaluator import GenericLLMEvaluator
physics_sets = [
'atomic_dataset_textonly',
'electro_dataset_textonly',
'mechanics_dataset_textonly',
'optics_dataset_textonly',
'quantum_dataset_textonly',
'statistics_dataset_textonly',
]
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of <Answer 1:> or <Answer 2:>, etc. Please note that only when all sub predictions given in prediction correspond one-to-one with the answer and are all correct, will the prediction be considered correct; otherwise, it will be considered incorrect.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: All Sub Predictions Are Correct
B: Not Every Sub Predictions is Correct
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# GRADER_TEMPLATE = """
# Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
#
# Here are some evaluation criteria:
# 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
# 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
# 3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of <Answer 1:> or <Answer 2:>, etc. Please note that as long as at least one correct answer appears in the prediction, the prediction is considered correct.
# 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
# 5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible.
#
# Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
# A: At Least One Sub Prediction is Correct
# B: All Sub Predictions are Incorrect
# Just return the letters "A" or "B", with no text around it.
#
# Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
#
# <Original Question Begin>: \n{input}\n<Original Question End>\n\n
# <Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
# <Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
#
# Judging the correctness of candidates' answers:
# """.strip()
physics_reader_cfg = dict(input_columns=['input'], output_column='target')
physics_datasets = []
for _name in physics_sets:
physics_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f'Answer the given question step by step. Begin by explaining your reasoning process clearly. Conclude by providing the final answers at the end in LaTeX boxed format. Think step by step before answering. It should be noted that the question may include multiple sub questions, please ensure that each question is answered in order.\n\nQ: {{input}}\nA: ',
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
physics_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=PHYSICSDataset,
path='opencompass/PHYSICS-textonly',
abbr='PHYSICS_' + _name,
name=_name,
reader_cfg=physics_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
physics_datasets.append(
dict(
abbr='PHYSICS_' + _name,
type=PHYSICSDataset,
path='opencompass/PHYSICS-textonly',
name=_name,
reader_cfg=physics_reader_cfg,
infer_cfg=physics_infer_cfg,
eval_cfg=physics_eval_cfg,
)
)
from mmengine.config import read_base
with read_base():
from .PJExam_gen_8cd97c import PJExam_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import PJExamDataset, PJExamEvaluator
PJExam_datasets = []
for _name in [
'gk-2022-v1', 'gk-2022-v1-math', 'gk-2023-v1', 'gk-2023-v1-math',
'gk-2023-v2', 'gk-2023-v2-math', 'zk-2022-v1'
]:
_hint = '请你做一道</major>选择题\n请你一步一步思考并将思考过程写在【解析】和<eoe>之间。你将从A,B,C,D中选出正确的答案,并写在【答案】和<eoa>之间。\n例如:【答案】A<eoa>\n完整的题目回答的格式如下:\n【解析】...<eoe>\n【答案】...<eoa>\n请你严格按照上述格式作答。\n题目如下:\n'
_reader_cfg = {
'input_columns': ['question'],
'output_column': 'std_ans',
},
_infer_cfg = {
'ice_template': {
'type': PromptTemplate,
'template': {
'round': [{
'role': 'HUMAN',
'prompt': _hint + '{question}',
}]
},
'ice_token': '</E>'
},
'retriever': {
'type': ZeroRetriever
},
'inferencer': {
'type': GenInferencer,
'max_out_len': 1024,
}
}
_eval_cfg = {
'evaluator': {
'type': PJExamEvaluator
},
'pred_role': 'BOT',
'ds_column': 'eval_infos'
}
_dataset = {
'type': PJExamDataset,
'abbr': 'PJExamDataset-' + _name,
'path': './data/PJExam',
'name': _name,
'reader_cfg': _reader_cfg,
'infer_cfg': _infer_cfg,
'eval_cfg': _eval_cfg,
}
PJExam_datasets.append(_dataset)
del _name, _hint, _reader_cfg, _infer_cfg, _eval_cfg, _dataset
from mmengine.config import read_base
with read_base():
from .flores_gen_2697d7 import PMMEval_flores_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalFloresDataset, PMMEvalFloresEvaluator, pmmeval_flores_postprocess
NATURAL_LANGUAGE_FULLNAMES_FLORES = ['Chinese', 'Arabic', 'Spanish', 'French', 'Japanese', 'Korean', 'Portuguese', 'Thai', 'Vietnamese']
PROMPT = {
'Chinese': '将这个句子从英语翻译成中文。\n\n{src}',
'Arabic': 'ترجم هذه الجملة من الإنجليزية إلى العربية.\n\n{src}',
'Spanish': 'Traduce esta oración del inglés al español.\n\n{src}',
'Japanese': 'この文を英語から日本語に翻訳してください。\n\n{src}',
'Korean': '이 문장을 영어에서 한국어로 번역하세요.\n\n{src}',
'Thai': 'แปลประโยคนี้จากภาษาอังกฤษเป็นภาษาไทย.\n\n{src}',
'French': "Traduisez cette phrase de l'anglais en français.\n\n{src}",
'Portuguese': 'Traduza esta frase do inglês para o português.\n\n{src}',
'Vietnamese': 'Dịch câu này từ tiếng Anh sang tiếng Việt.\n\n{src}'
}
PMMEval_flores_datasets = list()
# Add flores_200
PMMEval_flores_reader_cfg = dict(
input_columns=['src'],
output_column='tgt',
test_split='test'
)
PMMEval_flores_datasets = list()
for lang_fullname in NATURAL_LANGUAGE_FULLNAMES_FLORES:
PMMEval_flores_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PROMPT[lang_fullname]
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
PMMEval_flores_eval_cfg = dict(
evaluator=dict(type=PMMEvalFloresEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_flores_postprocess, lang_fullname=lang_fullname)
)
PMMEval_flores_datasets.append(
dict(
abbr=f'flores-{lang_fullname}',
type=PMMEvalFloresDataset,
path='P-MMEval',
lang_fullname=lang_fullname,
reader_cfg=PMMEval_flores_reader_cfg,
infer_cfg=PMMEval_flores_infer_cfg,
eval_cfg=PMMEval_flores_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .humanevalxl_gen_4dfef4 import PMMEval_HumanEvalXL_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalHumanEvalXLDataset, PMMEvalHumanEvalXLEvaluator
NATURAL_LANGUAGE_FULLNAMES = ['English', 'Chinese', 'Arabic', 'Spanish', 'French', 'Japanese', 'Korean', 'Portuguese', 'Thai', 'Vietnamese']
PMMEval_HumanEvalXL_datasets = list()
PMMEval_HumanEvalXL_reader_cfg = dict(
input_columns=['task_id', 'prompt', 'entry_point', 'test', 'language', 'description', 'natural_language'],
output_column='declaration',
test_split='test'
)
PMMEval_HumanEvalXL_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template='{prompt}'),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
PMMEval_HumanEvalXL_datasets = list()
for lang_fullname in NATURAL_LANGUAGE_FULLNAMES:
for program_lang in ['python', 'java', 'javascript']:
PMMEval_HumanEvalXL_eval_cfg = dict(
evaluator=dict(
type=PMMEvalHumanEvalXLEvaluator,
language=program_lang,
text_language=lang_fullname,
ip_address='localhost',
port=5001),
pred_role='BOT')
PMMEval_HumanEvalXL_datasets.append(
dict(
abbr=f'humanevalxl-{program_lang}-{lang_fullname}',
type=PMMEvalHumanEvalXLDataset,
path='P-MMEval',
lang=lang_fullname,
program_lang=program_lang,
reader_cfg=PMMEval_HumanEvalXL_reader_cfg,
infer_cfg=PMMEval_HumanEvalXL_infer_cfg,
eval_cfg=PMMEval_HumanEvalXL_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .mgsm_gen_679720 import PMMEval_MGSM_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalMGSMDataset, PMMEvalMGSMEvaluator
NATURAL_LANGUAGE_CODES = ['en', 'zh', 'ar', 'es', 'fr', 'ja', 'ko', 'pt', 'th', 'vi']
LANG_TO_INSTRUCTIONS = {
'en': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"The answer is \". Do not add anything other than the integer answer after \"The answer is \".\n\n{question}",
'es': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"La respuesta es \". Do not add anything other than the integer answer after \"La respuesta es \".\n\n{question}",
'fr': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"La réponse est \". Do not add anything other than the integer answer after \"La réponse est \".\n\n{question}",
'zh': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"答案是 \". Do not add anything other than the integer answer after \"答案是 \".\n\n{question}",
'ja': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"答えは \". Do not add anything other than the integer answer after \"答えは \".\n\n{question}",
'th': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"คำตอบคือ \". Do not add anything other than the integer answer after \"คำตอบคือ \".\n\n{question}",
'ko': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"답변은 \". Do not add anything other than the integer answer after \"답변은 \".\n\n{question}",
'pt': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"A resposta é \". Do not add anything other than the integer answer after \"A resposta é \".\n\n{question}",
'vi': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"Câu trả lời là \". Do not add anything other than the integer answer after \"Câu trả lời là \".\n\n{question}",
'ar': "Solve this math problem. Give the reasoning steps before giving the final answer on the last line by itself in the format of \"الجواب هو \". Do not add anything other than the integer answer after \"الجواب هو \".\n\n{question}"
}
PMMEval_MGSM_datasets = list()
# Add flores_200
PMMEval_MGSM_reader_cfg = dict(
input_columns=['question'],
output_column='answer',
test_split='test'
)
PMMEval_MGSM_eval_cfg = dict(
evaluator=dict(type=PMMEvalMGSMEvaluator),
pred_role='BOT')
for lang_code in NATURAL_LANGUAGE_CODES:
PMMEval_MGSM_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=LANG_TO_INSTRUCTIONS[lang_code]
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
PMMEval_MGSM_datasets.append(
dict(
abbr=f'mgsm-{lang_code}',
type=PMMEvalMGSMDataset,
path='P-MMEval',
lang=lang_code,
reader_cfg=PMMEval_MGSM_reader_cfg,
infer_cfg=PMMEval_MGSM_infer_cfg,
eval_cfg=PMMEval_MGSM_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .mhellaswag_gen_1a6b73 import PMMEval_MHellaswag_datasets
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment