Commit c289ecc0 authored by xinghao's avatar xinghao
Browse files

Initial commit

parents
Pipeline #3004 canceled with stages
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalMHellaswagDataset, PMMEvalMHellaswagEvaluator, pmmeval_mhellaswag_postprocess
NATURAL_LANGUAGE_CODES = ['en', 'zh', 'ar', 'es', 'fr', 'ja', 'ko', 'pt', 'th', 'vi']
PMMEVAL_MHELLASWAG_TEMPLATE = "Input: {ctx}\nOptions: \nA. {option_1}\nB. {option_2}\nC. {option_3}\nD. {option_4}\nPick the correct ending for the sentence from A, B, C, and D, and return it in the following JSON format:\n{\"answer\": \"[choice]\"}\nwhere [choice] must be one of A, B, C or D."
PMMEval_MHellaswag_datasets = list()
PMMEval_MHellaswag_reader_cfg = dict(
input_columns=['ctx', 'option_1', 'option_2', 'option_3', 'option_4'],
output_column='label',
test_split='test'
)
PMMEval_MHellaswag_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PMMEVAL_MHELLASWAG_TEMPLATE
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
PMMEval_MHellaswag_datasets = list()
for lang_code in NATURAL_LANGUAGE_CODES:
PMMEval_MHellaswag_eval_cfg = dict(
evaluator=dict(type=PMMEvalMHellaswagEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_mhellaswag_postprocess, lang_code=lang_code)
)
PMMEval_MHellaswag_datasets.append(
dict(
abbr=f'mhellaswag-{lang_code}',
type=PMMEvalMHellaswagDataset,
path='P-MMEval',
lang=lang_code,
reader_cfg=PMMEval_MHellaswag_reader_cfg,
infer_cfg=PMMEval_MHellaswag_infer_cfg,
eval_cfg=PMMEval_MHellaswag_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .mifeval_gen_79f8fb import PMMEval_MIFEval_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalMIFEvalDataset, PMMEvalMIFEvalEvaluator, pmmeval_mifeval_postprocess
NATURAL_LANGUAGE_CODES = ['en', 'zh', 'ar', 'es', 'fr', 'ja', 'ko', 'pt', 'th', 'vi']
PMMEVAL_MIFEVAL_TEMPLATE = '{prompt}'
PMMEval_MIFEval_datasets = list()
PMMEval_MIFEval_reader_cfg = dict(
input_columns=['prompt', 'instruction_id_list', 'kwargs'],
output_column=None,
test_split='test'
)
PMMEval_MIFEval_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PMMEVAL_MIFEVAL_TEMPLATE
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
for lang_code in NATURAL_LANGUAGE_CODES:
PMMEval_MIFEval_eval_cfg = dict(
evaluator=dict(type=PMMEvalMIFEvalEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_mifeval_postprocess, lang_code=lang_code)
)
PMMEval_MIFEval_datasets.append(
dict(
abbr=f'mifeval-{lang_code}',
type=PMMEvalMIFEvalDataset,
path='P-MMEval',
lang=lang_code,
reader_cfg=PMMEval_MIFEval_reader_cfg,
infer_cfg=PMMEval_MIFEval_infer_cfg,
eval_cfg=PMMEval_MIFEval_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .mlogiqa_gen_36c4f9 import PMMEval_MLogiQA_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalMLogiQADataset, PMMEvalMLogiQAEvaluator, pmmeval_mlogiqa_postprocess
NATURAL_LANGUAGE_CODES = ['en', 'zh', 'ar', 'es', 'fr', 'ja', 'ko', 'pt', 'th', 'vi']
PMMEVAL_MLOGIQA_TEMPLATE = "Passage: {context}\nQuestion: {question}\nChoices:\nA.{option_1}\nB.{option_2}\nC.{option_3}\nD.{option_4}\nPlease choose the most suitable one among A, B, C and D as the answer to this question, and return it in the following JSON format:\n{'answer': '[choice]'}\nwhere [choice] must be one of A, B, C and D."
PMMEval_MLogiQA_datasets = []
PMMEval_MLogiQA_reader_cfg = dict(
input_columns=['context', 'question', 'option_1', 'option_2', 'option_3', 'option_4'],
output_column='answer',
train_split='test')
PMMEval_MLogiQA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PMMEVAL_MLOGIQA_TEMPLATE
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
for lang_code in NATURAL_LANGUAGE_CODES:
PMMEval_MLogiQA_eval_cfg = dict(
evaluator=dict(type=PMMEvalMLogiQAEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_mlogiqa_postprocess, lang_code=lang_code))
PMMEval_MLogiQA_datasets.append(
dict(
abbr=f'mlogiqa-{lang_code}',
type=PMMEvalMLogiQADataset,
path='P-MMEval',
lang=lang_code,
reader_cfg=PMMEval_MLogiQA_reader_cfg,
infer_cfg=PMMEval_MLogiQA_infer_cfg,
eval_cfg=PMMEval_MLogiQA_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .mmmlu_gen_d5017d import PMMEval_MMMLU_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalMMMLUDataset, PMMEvalMMMLUEvaluator, pmmeval_mmmlu_postprocess
NATURAL_LANGUAGE_CODES_MMMLU = ['EN-US', 'ZH-CN', 'AR-XY', 'ES-LA', 'FR-FR', 'JA-JP', 'KO-KR', 'PT-BR', 'TH-TL', 'VI-VT']
PMMEVAL_MMMLU_TEMPLATE = "The following is a multiple-choice question. Please choose the most suitable one among A, B, C and D as the answer to this question, and return it in the following JSON format:\n{\"answer\": \"[choice]\"}\nwhere [choice] must be one of A, B, C and D.\n\n{Question}\nA. {A}\nB. {B}\nC. {C}\nD. {D}"
PMMEval_MMMLU_datasets = []
PMMEval_MMMLU_reader_cfg = dict(
input_columns=['Question', 'A', 'B', 'C', 'D'],
output_column='Answer',
train_split='test')
PMMEval_MMMLU_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PMMEVAL_MMMLU_TEMPLATE
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
for lang_code in NATURAL_LANGUAGE_CODES_MMMLU:
PMMEval_MMMLU_eval_cfg = dict(
evaluator=dict(type=PMMEvalMMMLUEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_mmmlu_postprocess, lang_code=lang_code))
PMMEval_MMMLU_datasets.append(
dict(
abbr=f'mmmlu-{lang_code}',
type=PMMEvalMMMLUDataset,
path='P-MMEval',
lang=lang_code,
difficulty='all',
reader_cfg=PMMEval_MMMLU_reader_cfg,
infer_cfg=PMMEval_MMMLU_infer_cfg,
eval_cfg=PMMEval_MMMLU_eval_cfg)
)
from mmengine.config import read_base
with read_base():
from .flores_gen_2697d7 import PMMEval_flores_datasets
from .humanevalxl_gen_bdec92 import PMMEval_HumanEvalXL_datasets
from .mgsm_gen_679720 import PMMEval_MGSM_datasets
from .mhellaswag_gen_1a6b73 import PMMEval_MHellaswag_datasets
from .mifeval_gen_79f8fb import PMMEval_MIFEval_datasets
from .mlogiqa_gen_36c4f9 import PMMEval_MLogiQA_datasets
from .mmmlu_gen_d5017d import PMMEval_MMMLU_datasets
from .xnli_gen_973734 import PMMEval_XNLI_datasets
PMMEval_datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
from mmengine.config import read_base
with read_base():
from .xnli_gen_973734 import PMMEval_XNLI_datasets
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.PMMEval import PMMEvalXNLIDataset, PMMEvalXNLIEvaluator, pmmeval_xnli_postprocess
NATURAL_LANGUAGE_CODES = ['en', 'zh', 'ar', 'es', 'fr', 'ja', 'ko', 'pt', 'th', 'vi']
PMMEVAL_XNLI_TEMPLATE = """Take the following as truth: {premise}
Then the following statement: \"{statement}\" is
Options:
A. true
B. inconclusive
C. false
Select the correct option from A, B, and C, and return it in the following JSON format:
{"answer": "[choice]"}
where [choice] must be one of A, B, and C."""
PMMEval_XNLI_datasets = list()
# Add flores_200
PMMEval_XNLI_reader_cfg = dict(
input_columns=['premise', 'statement'],
output_column='answer',
test_split='test'
)
PMMEval_XNLI_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=PMMEVAL_XNLI_TEMPLATE
)
]
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
for lang_code in NATURAL_LANGUAGE_CODES:
PMMEval_XNLI_eval_cfg = dict(
evaluator=dict(type=PMMEvalXNLIEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=pmmeval_xnli_postprocess, lang_code=lang_code))
PMMEval_XNLI_datasets.append(
dict(
abbr=f'xnli-{lang_code}',
type=PMMEvalXNLIDataset,
path='P-MMEval',
lang=lang_code,
reader_cfg=PMMEval_XNLI_reader_cfg,
infer_cfg=PMMEval_XNLI_infer_cfg,
eval_cfg=PMMEval_XNLI_eval_cfg)
)
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets.ProteinLMBench import ProteinLMBenchDataset, ProteinLMBenchEvaluator
QUERY_TEMPLATE = "Answer the following multiple choice question. There is only one correct answer. The last line of your response should be in the format 'Answer: $LETTER' (without quotes), where LETTER is the letter among {start} through {end}.\n{question}"
# Reader configuration
reader_cfg = dict(
input_columns=['question', 'start', 'end', 'options'],
output_column='label',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=QUERY_TEMPLATE
)
], ),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=ProteinLMBenchEvaluator),
)
proteinlmbench_dataset = dict(
abbr='ProteinLMBench',
type=ProteinLMBenchDataset,
path='tsynbio/ProteinLMBench',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg
)
proteinlmbench_datasets = [proteinlmbench_dataset]
from mmengine.config import read_base
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets.ProteinLMBench import ProteinLMBenchDataset
QUERY_TEMPLATE = "Answer the following multiple choice question. There is only one correct answer. The last line of your response should be in the format 'Answer: $LETTER' (without quotes), where LETTER is the letter among {start} through {end}.\n{question}"
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: {question}\n<Original Question End>\n\n
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
reader_cfg = dict(
input_columns=['question', 'start', 'end', 'options'],
output_column='label',
)
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=ProteinLMBenchDataset,
path='tsynbio/ProteinLMBench',
reader_cfg=reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
proteinlmbench_dataset = dict(
abbr='ProteinLMBench',
type=ProteinLMBenchDataset,
path='tsynbio/ProteinLMBench',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg
)
proteinlmbench_datasets = [proteinlmbench_dataset]
from mmengine.config import read_base
with read_base():
from .PubMedQA_llmjudge_gen_f00302 import PubMedQA_datasets
\ No newline at end of file
from mmengine.config import read_base
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets.PubMedQA import PubMedQADataset
QUERY_TEMPLATE = """
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering.
Question:\n
{question}
Options:\n
{choices}
""".strip()
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: {question}\n {choices} \n<Original Question End>\n\n
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
PubMedQA_datasets = []
PubMedQA_reader_cfg = dict(
input_columns=['question', 'choices'],
output_column='label',
)
PubMedQA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
PubMedQA_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=PubMedQADataset,
path='qiaojin/PubMedQA',
reader_cfg=PubMedQA_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
PubMedQA_datasets.append(
dict(
abbr=f'PubMedQA',
type=PubMedQADataset,
path='qiaojin/PubMedQA',
reader_cfg=PubMedQA_reader_cfg,
infer_cfg=PubMedQA_infer_cfg,
eval_cfg=PubMedQA_eval_cfg,
)
)
\ No newline at end of file
# QuALITY
## Introduction
The following introduction comes from the description in [QuALITY Leaderboard](https://nyu-mll.github.io/quality/)
```
QuALITY is a multiple-choice question answering dataset with context passages in English that have an average length of about 5,000 tokens.
```
These questions were categorized into two levels: easy and hard.
## Official link
### Paper
[QuALITY: Question Answering with Long Input Texts, Yes!](https://arxiv.org/pdf/2112.08608.pdf)
### Repository
[nyu-mll/quality](https://github.com/nyu-mll/quality)
## Evaluation results
```
dataset version metric mode qwen1.5-7b-chat-hf qwen1.5-14b-chat-hf qwen1.5-72b-chat-hf
--------- --------- -------- ------ -------------------- --------------------- ---------------------
QuALITY ed2404 easy_acc gen 62.39 68.17 76.69
QuALITY ed2404 hard_acc gen 49.27 56.22 63.96
QuALITY ed2404 all_acc gen 54.65 60.88 68.84
```
## Reference
```
@inproceedings{pang-etal-2022-quality,
title = "{Q}u{ALITY}: Question Answering with Long Input Texts, Yes!",
author = "Pang, Richard Yuanzhe and
Parrish, Alicia and
Joshi, Nitish and
Nangia, Nikita and
Phang, Jason and
Chen, Angelica and
Padmakumar, Vishakh and
Ma, Johnny and
Thompson, Jana and
He, He and
Bowman, Samuel",
booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.naacl-main.391",
pages = "5336--5358",
abstract = "To enable building and testing models on long-document comprehension, we introduce QuALITY, a multiple-choice QA dataset with context passages in English that have an average length of about 5,000 tokens, much longer than typical current models can process. Unlike in prior work with passages, our questions are written and validated by contributors who have read the entire passage, rather than relying on summaries or excerpts. In addition, only half of the questions are answerable by annotators working under tight time constraints, indicating that skimming and simple search are not enough to consistently perform well. Our baseline models perform poorly on this task (55.4{\%}) and significantly lag behind human performance (93.5{\%}).",
}
```
from mmengine.config import read_base
with read_base():
from .QuALITY_gen_c407cb import QuALITY_datasets # noqa: F401, F403
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import QuALITYDataset, QuALITYEvaluator
from opencompass.utils.text_postprocessors import first_option_postprocess
QuALITY_reader_cfg = dict(
input_columns=['article', 'question', 'A', 'B', 'C', 'D'],
output_column='gold_label',
)
QuALITY_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
'Read the article, and answer the question.\n\nArticle:\n{article}\n\nQ: {question}\n\nA. {A}\nB. {B}\nC. {C}\nD. {D}'
),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
QuALITY_eval_cfg = dict(
evaluator=dict(type=QuALITYEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
pred_role='BOT')
QuALITY_datasets = [
dict(
abbr='QuALITY',
type=QuALITYDataset,
path='./data/QuALITY/QuALITY.v1.0.1.htmlstripped.dev',
reader_cfg=QuALITY_reader_cfg,
infer_cfg=QuALITY_infer_cfg,
eval_cfg=QuALITY_eval_cfg),
]
# R-Bench
## Introduction
The following introduction comes from the description on the [R-Bench official website](https://evalmodels.github.io/rbench/):
```
R-Bench is a graduate-level multi-disciplinary benchmark for evaluating the complex reasoning capabilities of Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs). R stands for Reasoning.
```
According to statistics on R-Bench, the benchmark spans 19 departments, including mathematics, physics, biology, computer science, and chemistry, covering over 100 subjects such as Inorganic Chemistry, Chemical Reaction Kinetics, and Electromagnetism. It features 1,094 questions designed for testing language models and 665 questions specifically tailored for evaluating multimodal reasoning capabilities, available in both English and Chinese.
These questions are meticulously curated to ensure rigorous difficulty calibration, subject balance, and cross-linguistic alignment, enabling the assessment to be an Olympiad-level multi-disciplinary benchmark.
## Official Links
### Paper
[R-Bench: Graduate-level Multi-disciplinary Benchmarks for LLM & MLLM Complex Reasoning Evaluation](https://arxiv.org/abs/2505.02018)
## Evaluation Results
### Language Model Results
```
Model Source Date Average RBench-T RBench-T (zh)
------------------------ -------------------------------------------------- ---------- ---------- ---------- ---------------
OpenAI o1 🥇 https://openai.com/o1/ 2024-12-17 69.6 69.0 70.1
Gemini2.0-Flash-Thinking 🥈 https://deepmind.google/technologies/gemini/flash-thinking/ 2025-01-21 68.0 68.4 67.5
Doubao1.5Pro 🥉 https://www.volcengine.com/product/doubao 2025-01-21 62.7 62.0 63.4
GPT-4o https://openai.com/index/hello-gpt-4o/ 2024-11-20 52.6 53.6 51.6
Claude3.5-sonnet https://www.anthropic.com/news/claude-3-5-sonnet 2024-06-20 57.4 57.5 57.3
Qwen2.5-72B https://github.com/QwenLM/Qwen2.5 2024-09-19 52.9 53.7 52.0
Qwen2.5-32B https://github.com/QwenLM/Qwen2.5 2024-09-19 50.4 50.8 49.9
Qwen2.5-7B https://github.com/QwenLM/Qwen2.5 2024-09-19 44.1 43.6 44.5
```
### Multimodal Model Results
```
Model Source Date Average RBench-M RBench-M (zh)
------------------------ -------------------------------------------------- ---------- ---------- ---------- ---------------
OpenAI o1 🥇 https://openai.com/o1/ 2024-12-17 53.1 53.2 53.0
Doubao1.5Pro 🥈 https://www.volcengine.com/product/doubao 2025-01-21 40.2 37.9 42.4
Claude-3-5-sonnet 🥉 https://www.anthropic.com/news/claude-3-5-sonnet 2025-04-10 39.0 39.7 38.3
GPT-4o https://openai.com/index/hello-gpt-4o/ 2024-11-20 33.3 33.4 33.2
Qwen2.5-72B https://github.com/QwenLM/Qwen2.5 2024-09-19 25.4 25.1 25.7
Qwen2.5-7B https://github.com/QwenLM/Qwen2.5 2024-09-19 21.0 19.6 22.3
```
Note:
- RBench-T: Text-only questions for language models test
- RBench-M: Multimodal questions for multimodal models test
- The values in the table represent the Top-1 accuracy, in %
- zh indicates the Chinese version
## Reference
```
@inproceedings{
guo2025rbench,
title={RBench: Graduate-level Multi-disciplinary Benchmarks for
LLM & MLLM Complex Reasoning Evaluation},
author={Meng-Hao Guo, Jiajun Xu, Yi Zhang, Jiaxi Song, Haoyang Peng, Yi-Xuan Deng,
Xinzhi Dong, Kiyohiro Nakayama, Zhengyang Geng, Chen Wang, Bolin Ni, Guo-Wei Yang,
Yongming Rao, Houwen Peng, Han Hu, Gordon Wetzstein, Shi-min Hu},
year={2025},
eprint={2505.02018},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2505.02018},
}
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
from opencompass.datasets import RBenchDataset
from opencompass.utils.text_postprocessors import first_option_postprocess
RBench_reader_cfg = dict(input_columns=[
'RBench_Question_Input', 'RBench_Option_A', 'RBench_Option_B',
'RBench_Option_C', 'RBench_Option_D', 'RBench_Option_E', 'RBench_Option_F'
],
output_column='target')
RBench_datasets = []
systemp_prompt_en = "Answer the following single choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEF). Think step by step before answering."
systemp_prompt_zh = '回答以下单选题。答案的最后一行应采用以下格式:“答案是$LETTER”(不带引号),其中 LETTER 是选项之一(例如 ABCDEF 之一)。回答前请逐步思考。'
RBench_infer_en_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f'{systemp_prompt_en}\nQuestion: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\nAnswer: '
),
], ),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
RBench_infer_zh_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f'{systemp_prompt_zh}\n问题: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\n答案: '
),
], ),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
RBench_eval_cfg = dict(evaluator=dict(type=AccwithDetailsEvaluator),
pred_postprocessor=dict(type=first_option_postprocess,
options='ABCDEF'))
RBench_datasets.append(
dict(
abbr=f'R-Bench_en',
type=RBenchDataset,
path='R-Bench/R-Bench',
name='R-Bench',
subset='en',
reader_cfg=RBench_reader_cfg,
infer_cfg=RBench_infer_en_cfg,
eval_cfg=RBench_eval_cfg,
))
RBench_datasets.append(
dict(
abbr=f'R-Bench_zh',
type=RBenchDataset,
path='R-Bench/R-Bench',
name='R-Bench',
subset='zh',
reader_cfg=RBench_reader_cfg,
infer_cfg=RBench_infer_zh_cfg,
eval_cfg=RBench_eval_cfg,
))
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import RBenchDataset
from opencompass.utils.text_postprocessors import first_option_postprocess
RBench_reader_cfg = dict(
input_columns=[
'RBench_Question_Input',
'RBench_Option_A',
'RBench_Option_B',
'RBench_Option_C',
'RBench_Option_D',
'RBench_Option_E',
'RBench_Option_F',
],
output_column='target',
)
RBench_datasets = []
systemp_prompt = "Answer the following single choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEF). Think step by step before answering."
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: {systemp_prompt}\nQuestion: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\n<Original Question End>\n\n
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
RBench_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f'{systemp_prompt}\nQuestion: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\nAnswer: ',
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
RBench_datasets = []
for subset in ['en', 'zh']:
RBench_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=RBenchDataset,
path='R-Bench/R-Bench',
name='R-Bench',
subset=subset,
reader_cfg=RBench_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
RBench_datasets.append(
dict(
abbr=f'R-Bench_{subset}',
type=RBenchDataset,
path='R-Bench/R-Bench',
name='R-Bench',
subset=subset,
reader_cfg=RBench_reader_cfg,
infer_cfg=RBench_infer_cfg,
eval_cfg=RBench_eval_cfg,
)
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment